code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''lxmert'''
UpperCamelCase_ = {}
def __init__( self : Optional[Any] , UpperCAmelCase : Dict=3_0522 , UpperCAmelCase : int=768 , UpperCAmelCase : Optional[Any]=12 , UpperCAmelCase : Tuple=9500 , UpperCAmelCase : List[str]=1600 , UpperCAmelCase : Union[str, Any]=400 , UpperCAmelCase : Optional[int]=3072 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Tuple=512 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Optional[Any]=0.0_2 , UpperCAmelCase : int=1e-12 , UpperCAmelCase : int=9 , UpperCAmelCase : Dict=5 , UpperCAmelCase : Optional[Any]=5 , UpperCAmelCase : int=2048 , UpperCAmelCase : Dict=4 , UpperCAmelCase : int=6.6_7 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]=True , **UpperCAmelCase : Tuple , ) -> List[str]:
'''simple docstring'''
lowercase : Dict =vocab_size
lowercase : Optional[int] =hidden_size
lowercase : int =num_attention_heads
lowercase : int =hidden_act
lowercase : List[Any] =intermediate_size
lowercase : str =hidden_dropout_prob
lowercase : Optional[Any] =attention_probs_dropout_prob
lowercase : Union[str, Any] =max_position_embeddings
lowercase : int =type_vocab_size
lowercase : Tuple =initializer_range
lowercase : Optional[Any] =layer_norm_eps
lowercase : Dict =num_qa_labels
lowercase : List[str] =num_object_labels
lowercase : Optional[Any] =num_attr_labels
lowercase : Tuple =l_layers
lowercase : Optional[Any] =x_layers
lowercase : List[str] =r_layers
lowercase : List[Any] =visual_feat_dim
lowercase : List[str] =visual_pos_dim
lowercase : Tuple =visual_loss_normalizer
lowercase : Optional[int] =task_matched
lowercase : Tuple =task_mask_lm
lowercase : Union[str, Any] =task_obj_predict
lowercase : List[Any] =task_qa
lowercase : Dict =visual_obj_loss
lowercase : int =visual_attr_loss
lowercase : List[Any] =visual_feat_loss
lowercase : Union[str, Any] ={'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**UpperCAmelCase )
| 707 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''LayoutLMv2ImageProcessor'''
UpperCamelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : List[str] , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Any =kwargs.pop('''feature_extractor''' )
lowercase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase : Tuple =self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Optional[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : List[str] =features['''words''']
lowercase : Optional[Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
lowercase : List[str] =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : str =self.get_overflowing_images(UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Dict =images
return encoded_inputs
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : str =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}' )
return images_with_overflow
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8 | 0 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
SCREAMING_SNAKE_CASE = object()
# For specifying empty leaf dict `{}`
SCREAMING_SNAKE_CASE = object()
def lowercase_ ( __A : str , __A : int ) -> Optional[Any]:
"""simple docstring"""
lowercase : str =tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(__A ) - len(__A ) + 1 ):
lowercase : Optional[int] =[x.match(__A ) for x, y in zip(__A , ks[i:] )]
if matches and all(__A ):
return True
return False
def lowercase_ ( __A : Optional[int] ) -> Dict:
"""simple docstring"""
def replace(__A : str , __A : Tuple ):
for rule, replacement in rules:
if _match(__A , __A ):
return replacement
return val
return replace
def lowercase_ ( ) -> Dict:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , __A )),
(("transformer", "wte", "embedding"), P('''mp''' , __A )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__A , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , __A )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__A , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , __A )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowercase_ ( __A : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : int =_get_partition_rules()
lowercase : Optional[int] =_replacement_rules(__A )
lowercase : Dict ={k: _unmatched for k in flatten_dict(__A )}
lowercase : int ={k: replace(__A , __A ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__A ) )
| 708 |
'''simple docstring'''
def lowercase_ ( __A : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
"""simple docstring"""
try:
lowercase : Any =int(__A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =1
lowercase : Dict =2
while i * i <= n:
while n % i == 0:
lowercase : Optional[int] =i
n //= i
i += 1
if n > 1:
lowercase : Dict =n
return int(__A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8 | 0 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE = {
'Salesforce/codegen-350M-mono': 2_048,
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ = CodeGenTokenizer
def __init__( self : Any , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : str="<|endoftext|>" , UpperCAmelCase : Dict="<|endoftext|>" , UpperCAmelCase : Any="<|endoftext|>" , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Union[str, Any] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
if kwargs.pop('''add_bos_token''' , UpperCAmelCase ):
lowercase : List[str] =kwargs.pop('''name_or_path''' , '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
f'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
f'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
lowercase : List[str] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCAmelCase ) != add_prefix_space:
lowercase : List[Any] =getattr(UpperCAmelCase , pre_tok_state.pop('''type''' ) )
lowercase : Tuple =add_prefix_space
lowercase : Union[str, Any] =pre_tok_class(**UpperCAmelCase )
lowercase : str =add_prefix_space
def A__ ( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : str ) -> BatchEncoding:
'''simple docstring'''
lowercase : List[str] =kwargs.get('''is_split_into_words''' , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Optional[int] , *UpperCAmelCase : Any , **UpperCAmelCase : str ) -> BatchEncoding:
'''simple docstring'''
lowercase : str =kwargs.get('''is_split_into_words''' , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase : Union[str, Any] =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , UpperCAmelCase : bool = False , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[List[str]] = None , **UpperCAmelCase : Dict , ) -> str:
'''simple docstring'''
lowercase : str =super().decode(
token_ids=UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , **UpperCAmelCase , )
if truncate_before_pattern is not None and len(UpperCAmelCase ) > 0:
lowercase : List[str] =self.truncate(UpperCAmelCase , UpperCAmelCase )
return decoded_text
def A__ ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
def find_re(UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
lowercase : Union[str, Any] =pattern.search(UpperCAmelCase , UpperCAmelCase )
return m.start() if m else -1
lowercase : str =[re.compile(UpperCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
lowercase : Any =list(re.finditer('''^print''' , UpperCAmelCase , re.MULTILINE ) )
if len(UpperCAmelCase ) > 1:
lowercase : List[str] =completion[: prints[1].start()]
lowercase : Optional[int] =list(re.finditer('''^def''' , UpperCAmelCase , re.MULTILINE ) )
if len(UpperCAmelCase ) > 1:
lowercase : Optional[Any] =completion[: defs[1].start()]
lowercase : Any =0
lowercase : int =[
pos for pos in [find_re(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) for terminal in terminals] if pos != -1
]
if len(UpperCAmelCase ) > 0:
return completion[: min(UpperCAmelCase )]
else:
return completion
| 709 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
lowercase : str =u
for i in range(1 , __A ):
lowercase : Any =temp * (u - i)
return temp
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : List[str] =int(input('''enter the numbers of values: ''' ) )
lowercase : list[list[float]] =[]
for _ in range(__A ):
y.append([] )
for i in range(__A ):
for j in range(__A ):
y[i].append(__A )
lowercase : List[Any] =0
print('''enter the values of parameters in a list: ''' )
lowercase : Optional[int] =list(map(__A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__A ):
lowercase : str =float(input() )
lowercase : int =int(input('''enter the value to interpolate: ''' ) )
lowercase : Union[str, Any] =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A ):
for j in range(n - i ):
lowercase : str =y[j + 1][i - 1] - y[j][i - 1]
lowercase : Any =y[0][0]
for i in range(1 , __A ):
summ += (ucal(__A , __A ) * y[0][i]) / math.factorial(__A )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 8 | 0 |
def lowercase_ ( __A : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
"""simple docstring"""
try:
lowercase : Any =int(__A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =1
lowercase : Dict =2
while i * i <= n:
while n % i == 0:
lowercase : Optional[int] =i
n //= i
i += 1
if n > 1:
lowercase : Dict =n
return int(__A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 710 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =0
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : str =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : int =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str =CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase : Optional[int] =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : Optional[Any] =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase : str =CLIPImageProcessor(**UpperCAmelCase )
# save in new folder
model_config.save_pretrained(UpperCAmelCase )
config.save_pretrained(UpperCAmelCase )
lowercase : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : int =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained('''clip-base''' )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Optional[int] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
lowercase : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
lowercase : List[str] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[int] =CustomImageProcessor.from_pretrained(UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self : Any ) -> Any:
'''simple docstring'''
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# If remote code is not set, the default is to use local
lowercase : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase : Tuple =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Dict =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 8 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
lowercase : str =u
for i in range(1 , __A ):
lowercase : Any =temp * (u - i)
return temp
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : List[str] =int(input('''enter the numbers of values: ''' ) )
lowercase : list[list[float]] =[]
for _ in range(__A ):
y.append([] )
for i in range(__A ):
for j in range(__A ):
y[i].append(__A )
lowercase : List[Any] =0
print('''enter the values of parameters in a list: ''' )
lowercase : Optional[int] =list(map(__A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__A ):
lowercase : str =float(input() )
lowercase : int =int(input('''enter the value to interpolate: ''' ) )
lowercase : Union[str, Any] =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A ):
for j in range(n - i ):
lowercase : str =y[j + 1][i - 1] - y[j][i - 1]
lowercase : Any =y[0][0]
for i in range(1 , __A ):
summ += (ucal(__A , __A ) * y[0][i]) / math.factorial(__A )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 711 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE = 8.988E9 # units = N * m^s * C^-2
def lowercase_ ( __A : float , __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
lowercase : Dict =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowercase : Union[str, Any] =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase : Tuple =(COULOMBS_CONSTANT * charge_product / abs(__A )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : str , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : bool = True , **UpperCAmelCase : Tuple , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Union[str, Any] =size if size is not None else {'''shortest_edge''': 224}
lowercase : List[str] =get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowercase : Union[str, Any] =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : int =get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase , param_name='''crop_size''' )
lowercase : List[str] =do_resize
lowercase : Dict =size
lowercase : Any =resample
lowercase : str =do_center_crop
lowercase : Optional[int] =crop_size
lowercase : List[str] =do_rescale
lowercase : List[str] =rescale_factor
lowercase : Optional[Any] =do_normalize
lowercase : str =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase : Union[str, Any] =image_std if image_std is not None else OPENAI_CLIP_STD
lowercase : str =do_convert_rgb
def A__ ( self : List[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Dict , ):
'''simple docstring'''
lowercase : str =get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowercase : Optional[int] =get_resize_output_image_size(UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=UpperCAmelCase )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Dict , ):
'''simple docstring'''
lowercase : Dict =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : str , ):
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[Any] , ):
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : int , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : int = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase : Dict , ):
'''simple docstring'''
lowercase : int =do_resize if do_resize is not None else self.do_resize
lowercase : Any =size if size is not None else self.size
lowercase : List[str] =get_size_dict(UpperCAmelCase , param_name='''size''' , default_to_square=UpperCAmelCase )
lowercase : Tuple =resample if resample is not None else self.resample
lowercase : Any =do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : str =crop_size if crop_size is not None else self.crop_size
lowercase : Tuple =get_size_dict(UpperCAmelCase , param_name='''crop_size''' , default_to_square=UpperCAmelCase )
lowercase : Optional[int] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : Dict =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : int =image_mean if image_mean is not None else self.image_mean
lowercase : Dict =image_std if image_std is not None else self.image_std
lowercase : Any =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase : Optional[int] =make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase : Dict =[convert_to_rgb(UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowercase : Union[str, Any] =[to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
lowercase : Optional[Any] =[self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_center_crop:
lowercase : Optional[int] =[self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowercase : Dict =[self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowercase : Tuple =[to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase : Optional[Any] ={'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 712 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8 | 0 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures')
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[int] =mock.Mock()
lowercase : Dict =500
lowercase : Union[str, Any] ={}
lowercase : Optional[int] =HTTPError
lowercase : List[Any] ={}
# Download this model to make sure it's in the cache.
lowercase : str =WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase ) as mock_head:
lowercase : Any =WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self : str ) -> Tuple:
'''simple docstring'''
lowercase : Optional[Any] =WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def A__ ( cls : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Any =TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def A__ ( cls : List[str] ) -> Dict:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def A__ ( self : Any ) -> Any:
'''simple docstring'''
lowercase : Tuple =WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
lowercase : List[Any] =WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCAmelCase , repo_id='''test-feature-extractor''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowercase : Union[str, Any] =WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def A__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[int] =WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
lowercase : int =WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCAmelCase , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowercase : str =WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
lowercase : List[str] =CustomFeatureExtractor.from_pretrained(UpperCAmelCase )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
lowercase : Dict =AutoFeatureExtractor.from_pretrained(
f'{USER}/test-dynamic-feature-extractor' , trust_remote_code=UpperCAmelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 713 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =list(poly_a or [0] )[:]
lowercase : Dict =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : Tuple =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[int] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : str =self.__multiply()
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase : List[Any] =self.c_max_length // 2
while next_ncol > 0:
lowercase : str =[[] for i in range(UpperCAmelCase )]
lowercase : List[str] =self.root**next_ncol
# First half of next step
lowercase : Union[str, Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Tuple =new_dft
lowercase : List[Any] =next_ncol // 2
return dft[0]
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.__dft('''A''' )
lowercase : Union[str, Any] =self.__dft('''B''' )
lowercase : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Tuple =2
while next_ncol <= self.c_max_length:
lowercase : Tuple =[[] for i in range(UpperCAmelCase )]
lowercase : Tuple =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : List[str] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] ='''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] ='''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''dandelin/vilt-b32-finetuned-vqa'''
UpperCamelCase_ = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
UpperCamelCase_ = '''image_qa'''
UpperCamelCase_ = AutoProcessor
UpperCamelCase_ = AutoModelForVisualQuestionAnswering
UpperCamelCase_ = ['''image''', '''text''']
UpperCamelCase_ = ['''text''']
def __init__( self : Optional[int] , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Tuple , UpperCAmelCase : "Image" , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
return self.pre_processor(UpperCAmelCase , UpperCAmelCase , return_tensors='''pt''' )
def A__ ( self : Dict , UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
with torch.no_grad():
return self.model(**UpperCAmelCase ).logits
def A__ ( self : Dict , UpperCAmelCase : Tuple ) -> List[str]:
'''simple docstring'''
lowercase : str =outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 714 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : List[Any] =str(bin(__A ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : Union[str, Any] =str(bin(__A ) )[2:]
if shift_amount >= len(__A ):
return "0b0"
lowercase : Any =binary_number[: len(__A ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
lowercase : str ='''0''' + str(bin(__A ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase : Dict =len(bin(__A )[3:] ) # Find 2's complement of number
lowercase : Optional[Any] =bin(abs(__A ) - (1 << binary_number_length) )[3:]
lowercase : int =(
'''1''' + '''0''' * (binary_number_length - len(__A )) + binary_number
)
if shift_amount >= len(__A ):
return "0b" + binary_number[0] * len(__A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : int ) -> None:
'''simple docstring'''
lowercase : int =size
# approximate the overall size of segment tree with given value
lowercase : int =[0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowercase : Dict =[0 for i in range(0 , 4 * size )]
lowercase : Optional[Any] =[0 for i in range(0 , 4 * size )] # flag for lazy update
def A__ ( self : Any , UpperCAmelCase : int ) -> int:
'''simple docstring'''
return idx * 2
def A__ ( self : int , UpperCAmelCase : int ) -> int:
'''simple docstring'''
return idx * 2 + 1
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[int] ) -> None:
'''simple docstring'''
if left_element == right_element:
lowercase : int =a[left_element - 1]
else:
lowercase : Union[str, Any] =(left_element + right_element) // 2
self.build(self.left(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.build(self.right(UpperCAmelCase ) , mid + 1 , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =max(
self.segment_tree[self.left(UpperCAmelCase )] , self.segment_tree[self.right(UpperCAmelCase )] )
def A__ ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ) -> bool:
'''simple docstring'''
if self.flag[idx] is True:
lowercase : Any =self.lazy[idx]
lowercase : Any =False
if left_element != right_element:
lowercase : Tuple =self.lazy[idx]
lowercase : Dict =self.lazy[idx]
lowercase : Tuple =True
lowercase : int =True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowercase : str =val
if left_element != right_element:
lowercase : Optional[int] =val
lowercase : Any =val
lowercase : int =True
lowercase : int =True
return True
lowercase : Optional[int] =(left_element + right_element) // 2
self.update(self.left(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.update(self.right(UpperCAmelCase ) , mid + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : List[Any] =max(
self.segment_tree[self.left(UpperCAmelCase )] , self.segment_tree[self.right(UpperCAmelCase )] )
return True
def A__ ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ) -> int | float:
'''simple docstring'''
if self.flag[idx] is True:
lowercase : Union[str, Any] =self.lazy[idx]
lowercase : int =False
if left_element != right_element:
lowercase : List[str] =self.lazy[idx]
lowercase : Tuple =self.lazy[idx]
lowercase : Dict =True
lowercase : Tuple =True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowercase : Any =(left_element + right_element) // 2
lowercase : int =self.query(self.left(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : str =self.query(self.right(UpperCAmelCase ) , mid + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return max(UpperCAmelCase , UpperCAmelCase )
def __str__( self : Tuple ) -> str:
'''simple docstring'''
return str([self.query(1 , 1 , self.size , UpperCAmelCase , UpperCAmelCase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
SCREAMING_SNAKE_CASE = 15
SCREAMING_SNAKE_CASE = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 715 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def A__ ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
pass
def lowercase_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =pipeline(
'''document-question-answering''' , model=UpperCAmelCase , tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Optional[Any] =INVOICE_URL
lowercase : Any =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
lowercase : Dict ='''What is the placebo?'''
lowercase : Optional[Any] =[
{
'''image''': load_image(UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =dqa_pipeline(UpperCAmelCase , top_k=2 )
self.assertEqual(
UpperCAmelCase , [
[
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Dict =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
lowercase : Union[str, Any] =INVOICE_URL
lowercase : Tuple ='''How many cats are there?'''
lowercase : Optional[int] =[
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowercase : Optional[Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
lowercase : List[str] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase : Optional[Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Any =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase : int ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Dict =[]
lowercase : str =[]
lowercase : str =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , words=UpperCAmelCase , boxes=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
lowercase : Dict =INVOICE_URL
lowercase : str ='''What is the invoice number?'''
lowercase : List[str] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
lowercase : Dict =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Union[str, Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : int =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Any =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : str ) -> Dict:
'''simple docstring'''
lowercase : Any =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : Tuple =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , )
lowercase : Tuple =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Dict =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : Any =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : str =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
lowercase : Tuple =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Dict =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : Dict =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : List[Any] =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
lowercase : str =INVOICE_URL
lowercase : int ='''What is the invoice number?'''
lowercase : Tuple =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Union[str, Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
lowercase : List[str] =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Union[str, Any] =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
lowercase : Any =INVOICE_URL
lowercase : Union[str, Any] ='''What is the invoice number?'''
lowercase : int =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
pass
| 8 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def A__ ( self : Tuple , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : List[Any] =[label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
if len(UpperCAmelCase ) == 0 or len(UpperCAmelCase ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(UpperCAmelCase ) )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : int =[sequences]
lowercase : Tuple =[]
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(UpperCAmelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__A )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : Optional[int]=ZeroShotClassificationArgumentHandler() , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =args_parser
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def A__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def A__ ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : str=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[int]=TruncationStrategy.ONLY_FIRST , **UpperCAmelCase : str ) -> int:
'''simple docstring'''
lowercase : int =self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
lowercase : Optional[int] =self.tokenizer.eos_token
try:
lowercase : int =self.tokenizer(
UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , )
except Exception as e:
if "too short" in str(UpperCAmelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
lowercase : List[str] =self.tokenizer(
UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , padding=UpperCAmelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def A__ ( self : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if kwargs.get('''multi_class''' , UpperCAmelCase ) is not None:
lowercase : Dict =kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
lowercase : str ={}
if "candidate_labels" in kwargs:
lowercase : List[Any] =self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
lowercase : str =kwargs['''hypothesis_template''']
lowercase : Tuple ={}
if "multi_label" in kwargs:
lowercase : Any =kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[int] , UpperCAmelCase : Union[str, List[str]] , *UpperCAmelCase : Dict , **UpperCAmelCase : Tuple , ) -> Optional[int]:
'''simple docstring'''
if len(UpperCAmelCase ) == 0:
pass
elif len(UpperCAmelCase ) == 1 and "candidate_labels" not in kwargs:
lowercase : Optional[Any] =args[0]
else:
raise ValueError(f'Unable to understand extra arguments {args}' )
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]="This example is {}." ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] =self._args_parser(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(UpperCAmelCase , UpperCAmelCase ) ):
lowercase : Tuple =self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(UpperCAmelCase ) - 1,
**model_input,
}
def A__ ( self : int , UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] =inputs['''candidate_label''']
lowercase : Union[str, Any] =inputs['''sequence''']
lowercase : Union[str, Any] ={k: inputs[k] for k in self.tokenizer.model_input_names}
lowercase : List[str] =self.model(**UpperCAmelCase )
lowercase : List[str] ={
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =[outputs['''candidate_label'''] for outputs in model_outputs]
lowercase : int =[outputs['''sequence'''] for outputs in model_outputs]
lowercase : Tuple =np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
lowercase : Union[str, Any] =logits.shape[0]
lowercase : Tuple =len(UpperCAmelCase )
lowercase : Optional[Any] =N // n
lowercase : int =logits.reshape((num_sequences, n, -1) )
if multi_label or len(UpperCAmelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
lowercase : Union[str, Any] =self.entailment_id
lowercase : Tuple =-1 if entailment_id == 0 else 0
lowercase : Optional[int] =reshaped_outputs[..., [contradiction_id, entailment_id]]
lowercase : int =np.exp(UpperCAmelCase ) / np.exp(UpperCAmelCase ).sum(-1 , keepdims=UpperCAmelCase )
lowercase : Union[str, Any] =scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
lowercase : Union[str, Any] =reshaped_outputs[..., self.entailment_id]
lowercase : List[str] =np.exp(UpperCAmelCase ) / np.exp(UpperCAmelCase ).sum(-1 , keepdims=UpperCAmelCase )
lowercase : List[Any] =list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 716 |
'''simple docstring'''
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A ) , __A )
return number - int(__A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 8 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def lowercase_ ( __A : str , __A : Any ) -> Union[str, Any]:
"""simple docstring"""
if os.path.exists(__A ):
if os.path.exists(os.path.join(__A , '''config.json''' ) ) and os.path.isfile(
os.path.join(__A , '''config.json''' ) ):
os.remove(os.path.join(__A , '''config.json''' ) )
if os.path.exists(os.path.join(__A , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(__A , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(__A , '''pytorch_model.bin''' ) )
else:
os.makedirs(__A )
model.save_pretrained(__A )
def lowercase_ ( __A : int , __A : Any=False ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] =2
if unlogit:
lowercase : str =torch.pow(__A , __A )
lowercase : str =p * torch.log(__A )
lowercase : List[str] =0
return -plogp.sum(dim=-1 )
def lowercase_ ( __A : List[str] ) -> int:
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(F'{x + 1}' for x in range(len(__A ) ) ) )
for row in range(len(__A ) ):
if tensor.dtype != torch.long:
logger.info(F'layer {row + 1}:\t' + '''\t'''.join(F'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(F'layer {row + 1}:\t' + '''\t'''.join(F'{x:d}' for x in tensor[row].cpu().data ) )
def lowercase_ ( __A : Tuple , __A : Union[str, Any] , __A : List[str] , __A : Optional[Any]=True , __A : int=True , __A : List[Any]=None , __A : int=False ) -> Optional[Any]:
"""simple docstring"""
lowercase : Dict =model.config.num_hidden_layers, model.config.num_attention_heads
lowercase : Optional[int] =torch.zeros(__A , __A ).to(args.device )
lowercase : Dict =torch.zeros(__A , __A ).to(args.device )
if head_mask is None:
lowercase : List[Any] =torch.ones(__A , __A ).to(args.device )
head_mask.requires_grad_(requires_grad=__A )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowercase : int =None
lowercase : str =0.0
lowercase : Dict =0.0
for step, inputs in enumerate(tqdm(__A , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
lowercase : Dict =tuple(t.to(args.device ) for t in inputs )
(lowercase ) : int =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowercase : Union[str, Any] =model(__A , labels=__A , head_mask=__A )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowercase : int =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__A ):
lowercase : Any =entropy(attn.detach() , __A )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__A ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowercase : Tuple =2
lowercase : Any =torch.pow(torch.pow(__A , __A ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
lowercase : List[str] =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(__A )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(__A )
logger.info('''Head ranked by importance scores''' )
lowercase : Any =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowercase : Union[str, Any] =torch.arange(
head_importance.numel() , device=args.device )
lowercase : int =head_ranks.view_as(__A )
print_ad_tensor(__A )
return attn_entropy, head_importance, total_loss
def lowercase_ ( __A : Any , __A : Dict , __A : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase : List[Any] =compute_heads_importance(__A , __A , __A , compute_entropy=__A )
lowercase : Tuple =1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __A , original_score * args.masking_threshold )
lowercase : Tuple =torch.ones_like(__A )
lowercase : str =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowercase : Any =original_score
while current_score >= original_score * args.masking_threshold:
lowercase : Optional[int] =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowercase : Dict =float('''Inf''' )
lowercase : List[Any] =head_importance.view(-1 ).sort()[1]
if len(__A ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowercase : List[Any] =current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
lowercase : Optional[int] =new_head_mask.view(-1 )
lowercase : Optional[Any] =0.0
lowercase : Dict =new_head_mask.view_as(__A )
lowercase : Any =new_head_mask.clone().detach()
print_ad_tensor(__A )
# Compute metric and head importance again
lowercase : List[Any] =compute_heads_importance(
__A , __A , __A , compute_entropy=__A , head_mask=__A )
lowercase : Optional[int] =1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('''Final head mask''' )
print_ad_tensor(__A )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowercase_ ( __A : Dict , __A : int , __A : Dict , __A : int ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] =datetime.now()
lowercase : int =compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A )
lowercase : Tuple =1 / loss
lowercase : Union[str, Any] =datetime.now() - before_time
lowercase : Dict =sum(p.numel() for p in model.parameters() )
lowercase : Tuple ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A ) )
}
for k, v in heads_to_prune.items():
if isinstance(__A , __A ):
lowercase : Optional[int] =[
v,
]
assert sum(len(__A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__A )
lowercase : str =sum(p.numel() for p in model.parameters() )
lowercase : Optional[Any] =datetime.now()
lowercase : List[str] =compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , )
lowercase : Any =1 / loss
lowercase : List[str] =datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __A , __A , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __A , __A )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 1_0_0 )
save_model(__A , args.output_dir )
def lowercase_ ( ) -> List[str]:
"""simple docstring"""
lowercase : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__A , type=__A , required=__A , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__A , type=__A , required=__A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__A , type=__A , required=__A , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__A , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__A , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__A , type=__A , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__A , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__A , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__A , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=__A , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=1_2_8 , type=__A , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__A , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=__A , default=4_2 )
parser.add_argument('''--local_rank''' , type=__A , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=__A , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=__A , default='''''' , help='''Can be used for distant debugging.''' )
lowercase : str =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowercase : Optional[Any] =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowercase : Dict =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowercase : Dict =torch.device('''cuda''' , args.local_rank )
lowercase : Dict =1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowercase : int =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowercase : Union[str, Any] =nn.parallel.DistributedDataParallel(
__A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A )
elif args.n_gpu > 1:
lowercase : Any =nn.DataParallel(__A )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__A )
torch.save(__A , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , __A )
# Prepare dataset
lowercase : Union[str, Any] =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowercase : Dict =(torch.from_numpy(__A ),)
lowercase : Dict =TensorDataset(*__A )
lowercase : Optional[Any] =RandomSampler(__A )
lowercase : List[Any] =DataLoader(__A , sampler=__A , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__A , __A , __A )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowercase : Tuple =mask_heads(__A , __A , __A )
prune_heads(__A , __A , __A , __A )
if __name__ == "__main__":
main()
| 717 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : List[Any] , __A : int , __A : int ) -> Optional[int]:
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowercase_ ( __A : np.ndarray , __A : Optional[str] , __A : Optional[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : int =to_pil_image(__A )
lowercase , lowercase : Tuple =pil_image.size
lowercase : Optional[Any] =pytesseract.image_to_data(__A , lang=__A , output_type='''dict''' , config=__A )
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase : Dict =[idx for idx, word in enumerate(__A ) if not word.strip()]
lowercase : str =[word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : Optional[int] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : List[Any] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : str =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : int =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase : Tuple =[]
for x, y, w, h in zip(__A , __A , __A , __A ):
lowercase : str =[x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
lowercase : List[str] =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : float = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = "" , **UpperCAmelCase : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Tuple =size if size is not None else {'''height''': 224, '''width''': 224}
lowercase : Optional[Any] =get_size_dict(UpperCAmelCase )
lowercase : Optional[Any] =do_resize
lowercase : List[Any] =size
lowercase : List[str] =resample
lowercase : Dict =do_rescale
lowercase : str =rescale_value
lowercase : Optional[int] =do_normalize
lowercase : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Union[str, Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase : List[Any] =apply_ocr
lowercase : Union[str, Any] =ocr_lang
lowercase : str =tesseract_config
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase : Tuple =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase : Optional[Any] =(size['''height'''], size['''width'''])
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase : Optional[int] =do_resize if do_resize is not None else self.do_resize
lowercase : Tuple =size if size is not None else self.size
lowercase : Optional[int] =get_size_dict(UpperCAmelCase )
lowercase : List[str] =resample if resample is not None else self.resample
lowercase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Optional[int] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[Any] =image_mean if image_mean is not None else self.image_mean
lowercase : Optional[int] =image_std if image_std is not None else self.image_std
lowercase : Any =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase : Any =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase : Dict =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase : str =make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowercase : Tuple =[to_numpy_array(UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase : int =[]
lowercase : Tuple =[]
for image in images:
lowercase , lowercase : Dict =apply_tesseract(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
words_batch.append(UpperCAmelCase )
boxes_batch.append(UpperCAmelCase )
if do_resize:
lowercase : int =[self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowercase : str =[self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowercase : Optional[Any] =[to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase : Dict =BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase )
if apply_ocr:
lowercase : int =words_batch
lowercase : List[str] =boxes_batch
return data
| 8 | 0 |
'''simple docstring'''
def lowercase_ ( __A : Optional[Any] ) -> int:
"""simple docstring"""
lowercase : Optional[int] =[0] * len(__A )
lowercase : List[str] =[]
lowercase : List[str] =[1] * len(__A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__A ) ):
if indegree[i] == 0:
queue.append(__A )
while queue:
lowercase : List[Any] =queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase : Dict =long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__A )
print(max(__A ) )
# Adjacency list of Graph
SCREAMING_SNAKE_CASE = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 718 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =parent
lowercase : Any =13
lowercase : Any =7
lowercase : Optional[int] =True
lowercase : Optional[int] =True
lowercase : Tuple =False
lowercase : Optional[Any] =True
lowercase : Dict =99
lowercase : Union[str, Any] =32
lowercase : Union[str, Any] =2
lowercase : Union[str, Any] =4
lowercase : List[str] =37
lowercase : str ='''gelu'''
lowercase : Dict =0.1
lowercase : List[Any] =0.1
lowercase : List[str] =512
lowercase : Optional[int] =16
lowercase : Optional[Any] =2
lowercase : List[str] =0.0_2
lowercase : Any =3
lowercase : Optional[Any] =4
lowercase : int =None
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
lowercase : Any =None
lowercase : str =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =TFDistilBertModel(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(UpperCAmelCase )
lowercase : str =[input_ids, input_mask]
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =TFDistilBertForMaskedLM(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str =TFDistilBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Optional[Any] =TFDistilBertForSequenceClassification(UpperCAmelCase )
lowercase : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Optional[int] =TFDistilBertForMultipleChoice(UpperCAmelCase )
lowercase : Optional[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Tuple =TFDistilBertForTokenClassification(UpperCAmelCase )
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : str =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Union[str, Any] =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : str =TFDistilBertModelTester(self )
lowercase : int =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase : Union[str, Any] =TFDistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : Tuple =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[Any] =model(UpperCAmelCase )[0]
lowercase : str =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Optional[int] =tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 8 | 0 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
SCREAMING_SNAKE_CASE = False
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : str , UpperCAmelCase : Optional[Any]=32 ) -> List[str]:
'''simple docstring'''
set_seed(0 )
lowercase : str =UNetaDModel(sample_size=UpperCAmelCase , in_channels=3 , out_channels=3 )
lowercase : int =torch.optim.SGD(model.parameters() , lr=0.0_0_0_1 )
return model, optimizer
@slow
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : int ='''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowercase : Tuple =DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='''linear''' , clip_sample=UpperCAmelCase , )
lowercase : Union[str, Any] =DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='''linear''' , clip_sample=UpperCAmelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowercase : Any =[torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCAmelCase ) for _ in range(4 )]
lowercase : Tuple =[torch.randn((4, 3, 32, 32) ).to(UpperCAmelCase ) for _ in range(4 )]
lowercase : str =[torch.randint(0 , 1000 , (4,) ).long().to(UpperCAmelCase ) for _ in range(4 )]
# train with a DDPM scheduler
lowercase : List[str] =self.get_model_optimizer(resolution=32 )
model.train().to(UpperCAmelCase )
for i in range(4 ):
optimizer.zero_grad()
lowercase : Any =ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase : List[Any] =model(UpperCAmelCase , timesteps[i] ).sample
lowercase : Any =torch.nn.functional.mse_loss(UpperCAmelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowercase : Union[str, Any] =self.get_model_optimizer(resolution=32 )
model.train().to(UpperCAmelCase )
for i in range(4 ):
optimizer.zero_grad()
lowercase : Dict =ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase : Any =model(UpperCAmelCase , timesteps[i] ).sample
lowercase : Optional[Any] =torch.nn.functional.mse_loss(UpperCAmelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-5 ) )
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-5 ) )
| 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
import os
from pathlib import Path
def lowercase_ ( ) -> str:
"""simple docstring"""
from torch.utils.cpp_extension import load
lowercase : Dict =Path(__A ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
lowercase : int =[
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , __A , with_cuda=__A , extra_include_paths=[str(__A )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 720 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version('torch'))
def lowercase_ ( __A : Union[str, Version] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase : Any =STR_OPERATION_TO_FUNC[operation]
if isinstance(__A , __A ):
lowercase : List[Any] =parse(importlib.metadata.version(__A ) )
return operation(__A , parse(__A ) )
def lowercase_ ( __A : str , __A : str ) -> Tuple:
"""simple docstring"""
return compare_versions(__A , __A , __A )
| 8 | 0 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =0
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : str =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : int =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str =CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase : Optional[int] =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : Optional[Any] =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase : str =CLIPImageProcessor(**UpperCAmelCase )
# save in new folder
model_config.save_pretrained(UpperCAmelCase )
config.save_pretrained(UpperCAmelCase )
lowercase : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : int =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained('''clip-base''' )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Optional[int] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
lowercase : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
lowercase : List[str] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[int] =CustomImageProcessor.from_pretrained(UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self : Any ) -> Any:
'''simple docstring'''
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# If remote code is not set, the default is to use local
lowercase : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase : Tuple =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Dict =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 721 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase_ ( __A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
lowercase : List[Any] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
lowercase : List[str] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
lowercase : Union[str, Any] =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 8 | 0 |
'''simple docstring'''
import enum
import shutil
import sys
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = shutil.get_terminal_size()
SCREAMING_SNAKE_CASE = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class UpperCAmelCase_ ( enum.Enum ):
"""simple docstring"""
UpperCamelCase_ = 0
UpperCamelCase_ = 1
def lowercase_ ( __A : Optional[Any] , __A : Optional[int]="" ) -> List[str]:
"""simple docstring"""
sys.stdout.write(str(__A ) + end )
sys.stdout.flush()
def lowercase_ ( __A : Union[str, Any] , __A : Tuple , __A : Optional[Any]="" ) -> List[Any]:
"""simple docstring"""
forceWrite(F'\u001b[{color}m{content}\u001b[0m' , __A )
def lowercase_ ( ) -> int:
"""simple docstring"""
forceWrite('''\r''' )
def lowercase_ ( __A : int , __A : str ) -> Optional[int]:
"""simple docstring"""
forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def lowercase_ ( ) -> int:
"""simple docstring"""
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def lowercase_ ( ) -> Any:
"""simple docstring"""
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''imagegpt'''
UpperCamelCase_ = ['''past_key_values''']
UpperCamelCase_ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[str] , UpperCAmelCase : Union[str, Any]=512 + 1 , UpperCAmelCase : str=32 * 32 , UpperCAmelCase : Union[str, Any]=512 , UpperCAmelCase : Optional[int]=24 , UpperCAmelCase : List[str]=8 , UpperCAmelCase : Dict=None , UpperCAmelCase : Dict="quick_gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Union[str, Any]=1e-5 , UpperCAmelCase : Tuple=0.0_2 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=False , UpperCAmelCase : Any=False , UpperCAmelCase : Any=False , **UpperCAmelCase : int , ) -> Any:
'''simple docstring'''
lowercase : Optional[Any] =vocab_size
lowercase : str =n_positions
lowercase : Dict =n_embd
lowercase : Optional[Any] =n_layer
lowercase : Union[str, Any] =n_head
lowercase : List[str] =n_inner
lowercase : Dict =activation_function
lowercase : List[Any] =resid_pdrop
lowercase : Optional[Any] =embd_pdrop
lowercase : Optional[Any] =attn_pdrop
lowercase : Any =layer_norm_epsilon
lowercase : List[str] =initializer_range
lowercase : int =scale_attn_weights
lowercase : Optional[int] =use_cache
lowercase : Optional[Any] =scale_attn_by_inverse_layer_idx
lowercase : List[str] =reorder_and_upcast_attn
lowercase : Optional[int] =tie_word_embeddings
super().__init__(tie_word_embeddings=UpperCAmelCase , **UpperCAmelCase )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@property
def A__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def A__ ( self : Union[str, Any] , UpperCAmelCase : "FeatureExtractionMixin" , UpperCAmelCase : int = 1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional["TensorType"] = None , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 32 , UpperCAmelCase : int = 32 , ) -> Mapping[str, Any]:
'''simple docstring'''
lowercase : Union[str, Any] =self._generate_dummy_images(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : List[Any] =dict(preprocessor(images=UpperCAmelCase , return_tensors=UpperCAmelCase ) )
return inputs
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : str , __A : str ) -> List[str]:
"""simple docstring"""
lowercase : List[Any] =RobertaPreLayerNormConfig.from_pretrained(
__A , architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
lowercase : List[str] =torch.load(hf_hub_download(repo_id=__A , filename='''pytorch_model.bin''' ) )
lowercase : int ={}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
lowercase : List[Any] ='''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
lowercase : str =tensor_value
lowercase : List[Any] =RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__A , config=__A , state_dict=__A )
model.save_pretrained(__A )
# convert tokenizer
lowercase : List[str] =AutoTokenizer.from_pretrained(__A )
tokenizer.save_pretrained(__A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 702 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
SCREAMING_SNAKE_CASE = {'allegro/herbert-base-cased': 514}
SCREAMING_SNAKE_CASE = {}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = HerbertTokenizer
def __init__( self : Dict , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : str="<pad>" , UpperCAmelCase : Optional[Any]="<mask>" , UpperCAmelCase : List[str]="</s>" , **UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sep_token=UpperCAmelCase , **UpperCAmelCase , )
def A__ ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : List[Any] =[self.cls_token_id]
lowercase : Any =[self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[Any] =[self.sep_token_id]
lowercase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase : List[Any] =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 8 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Any =inspect.getfile(accelerate.test_utils )
lowercase : Any =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
lowercase : Union[str, Any] =os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
lowercase : Union[str, Any] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
print(f'Found {torch.cuda.device_count()} devices.' )
lowercase : int =['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def A__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
print(f'Found {torch.cuda.device_count()} devices.' )
lowercase : List[str] =['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(f'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : Optional[int] =['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def A__ ( self : str ) -> List[Any]:
'''simple docstring'''
print(f'Found {torch.cuda.device_count()} devices, using 2 devices only' )
lowercase : List[str] =['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = Accelerator()
SCREAMING_SNAKE_CASE = (accelerator.state.process_index + 2, 10)
SCREAMING_SNAKE_CASE = torch.randint(0, 10, shape).to(accelerator.device)
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
SCREAMING_SNAKE_CASE = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
SCREAMING_SNAKE_CASE = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 703 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Any ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[int] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Any ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Tuple ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : List[str] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : Tuple =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Optional[Any] ='''1'''
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : str ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Optional[int] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Optional[Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : str =self.get_env()
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Any ='''1'''
lowercase : Optional[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''
from transformers import pipeline
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase : Tuple =self.get_env()
lowercase : Optional[int] ='''1'''
lowercase : Union[str, Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase : Dict =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] ='''
from transformers import AutoModel
'''
lowercase : Dict ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : int =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : List[str] ='''1'''
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 8 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : str =1
lowercase : Any =3
lowercase : Dict =(32, 32)
lowercase : str =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase )
return image
@property
def A__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Any =UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def A__ ( self : List[str] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Dict =AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def A__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase : List[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(UpperCAmelCase )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : List[str] =self.dummy_cond_unet_upscale
lowercase : Optional[int] =DDPMScheduler()
lowercase : Tuple =DDIMScheduler(prediction_type='''v_prediction''' )
lowercase : Optional[int] =self.dummy_vae
lowercase : Any =self.dummy_text_encoder
lowercase : Tuple =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : Any =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase : int =Image.fromarray(np.uinta(UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowercase : List[str] =StableDiffusionUpscalePipeline(
unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , max_noise_level=350 , )
lowercase : List[Any] =sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : Optional[Any] ='''A painting of a squirrel eating a burger'''
lowercase : str =torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase : Dict =sd_pipe(
[prompt] , image=UpperCAmelCase , generator=UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
lowercase : int =output.images
lowercase : Union[str, Any] =torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase : Union[str, Any] =sd_pipe(
[prompt] , image=UpperCAmelCase , generator=UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCAmelCase , )[0]
lowercase : int =image[0, -3:, -3:, -1]
lowercase : Optional[int] =image_from_tuple[0, -3:, -3:, -1]
lowercase : Union[str, Any] =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowercase : Optional[int] =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self : Optional[int] ) -> int:
'''simple docstring'''
lowercase : List[Any] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : List[str] =self.dummy_cond_unet_upscale
lowercase : Optional[Any] =DDPMScheduler()
lowercase : int =DDIMScheduler(prediction_type='''v_prediction''' )
lowercase : int =self.dummy_vae
lowercase : Optional[int] =self.dummy_text_encoder
lowercase : List[Any] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : str =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase : Dict =Image.fromarray(np.uinta(UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowercase : Union[str, Any] =StableDiffusionUpscalePipeline(
unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , max_noise_level=350 , )
lowercase : str =sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : Optional[Any] ='''A painting of a squirrel eating a burger'''
lowercase : List[str] =sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
lowercase : Optional[Any] =output.images
assert image.shape[0] == 2
lowercase : List[str] =torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase : Tuple =sd_pipe(
[prompt] , image=UpperCAmelCase , generator=UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
lowercase : List[Any] =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def A__ ( self : Any ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =self.dummy_cond_unet_upscale
lowercase : List[str] =DDPMScheduler()
lowercase : Union[str, Any] =DDIMScheduler(prediction_type='''v_prediction''' )
lowercase : List[str] =self.dummy_vae
lowercase : Optional[Any] =self.dummy_text_encoder
lowercase : Any =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : List[Any] =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase : Optional[int] =Image.fromarray(np.uinta(UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowercase : Union[str, Any] =unet.half()
lowercase : Union[str, Any] =text_encoder.half()
# make sure here that pndm scheduler skips prk
lowercase : Dict =StableDiffusionUpscalePipeline(
unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , max_noise_level=350 , )
lowercase : int =sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : List[Any] ='''A painting of a squirrel eating a burger'''
lowercase : List[str] =torch.manual_seed(0 )
lowercase : Union[str, Any] =sd_pipe(
[prompt] , image=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=2 , output_type='''np''' , ).images
lowercase : List[str] =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : Any ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowercase : List[str] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
lowercase : List[str] ='''stabilityai/stable-diffusion-x4-upscaler'''
lowercase : Tuple =StableDiffusionUpscalePipeline.from_pretrained(UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase : List[str] ='''a cat sitting on a park bench'''
lowercase : Any =torch.manual_seed(0 )
lowercase : Dict =pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , generator=UpperCAmelCase , output_type='''np''' , )
lowercase : List[Any] =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def A__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowercase : Any =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
lowercase : Optional[Any] ='''stabilityai/stable-diffusion-x4-upscaler'''
lowercase : Union[str, Any] =StableDiffusionUpscalePipeline.from_pretrained(
UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase : Union[str, Any] ='''a cat sitting on a park bench'''
lowercase : Any =torch.manual_seed(0 )
lowercase : Optional[Any] =pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , generator=UpperCAmelCase , output_type='''np''' , )
lowercase : Any =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def A__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase : List[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowercase : Optional[int] ='''stabilityai/stable-diffusion-x4-upscaler'''
lowercase : Union[str, Any] =StableDiffusionUpscalePipeline.from_pretrained(
UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase : Any ='''a cat sitting on a park bench'''
lowercase : Union[str, Any] =torch.manual_seed(0 )
lowercase : Tuple =pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=5 , output_type='''np''' , )
lowercase : Any =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 704 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : int ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowercase : Stack[int] =Stack()
lowercase : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
lowercase : Optional[Any] =operator_stack.peek()
operator_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : List[str] =operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 8 | 0 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowercase_ ( __A : str , __A : str , **__A : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase : int =AutoConfig.from_pretrained(__A , **__A )
lowercase : int =AutoModelForSeqaSeqLM.from_config(__A )
model.save_pretrained(__A )
AutoTokenizer.from_pretrained(__A ).save_pretrained(__A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 705 |
'''simple docstring'''
import re
def lowercase_ ( __A : str ) -> bool:
"""simple docstring"""
lowercase : Any =re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__A , __A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 8 | 0 |
'''simple docstring'''
from manim import *
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def A__ ( self : List[Any] ) -> int:
'''simple docstring'''
lowercase : Tuple =Rectangle(height=0.5 , width=0.5 )
lowercase : str =Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
lowercase : Any =Rectangle(height=0.2_5 , width=0.2_5 )
lowercase : Tuple =[mem.copy() for i in range(6 )]
lowercase : Tuple =[mem.copy() for i in range(6 )]
lowercase : Optional[int] =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : List[str] =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : Dict =VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : Any =Text('''CPU''' , font_size=24 )
lowercase : Tuple =Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase )
lowercase : Tuple =[mem.copy() for i in range(4 )]
lowercase : Dict =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : List[Any] =Text('''GPU''' , font_size=24 )
lowercase : Union[str, Any] =Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase )
lowercase : List[str] =[mem.copy() for i in range(6 )]
lowercase : str =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : Any =Text('''Model''' , font_size=24 )
lowercase : Dict =Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase )
lowercase : List[str] =[]
lowercase : int =[]
for i, rect in enumerate(UpperCAmelCase ):
lowercase : List[Any] =fill.copy().set_fill(UpperCAmelCase , opacity=0.8 )
target.move_to(UpperCAmelCase )
model_arr.append(UpperCAmelCase )
lowercase : Union[str, Any] =Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCAmelCase )
self.add(*UpperCAmelCase , *UpperCAmelCase )
lowercase : List[Any] =[meta_mem.copy() for i in range(6 )]
lowercase : Any =[meta_mem.copy() for i in range(6 )]
lowercase : Optional[int] =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : str =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : Optional[Any] =VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : List[str] =Text('''Disk''' , font_size=24 )
lowercase : Optional[int] =Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
disk.move_to([-4, -1.2_5, 0] )
self.add(UpperCAmelCase , UpperCAmelCase )
lowercase : List[str] =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase : List[str] =MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase , UpperCAmelCase )
lowercase : str =MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase )
lowercase : List[Any] =MarkupText(
f'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase ) )
lowercase : Optional[int] =Square(0.3 )
input.set_fill(UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCAmelCase , buff=0.5 )
self.play(Write(UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCAmelCase , buff=0.0_2 )
self.play(MoveToTarget(UpperCAmelCase ) )
self.play(FadeOut(UpperCAmelCase ) )
lowercase : str =Arrow(start=UpperCAmelCase , end=UpperCAmelCase , color=UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowercase : Optional[int] =MarkupText(
f'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase , run_time=3 ) )
lowercase : int ={'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.0_2}
self.play(
Write(UpperCAmelCase ) , Circumscribe(model_arr[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowercase : Union[str, Any] =a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
lowercase : Union[str, Any] =AnimationGroup(
FadeOut(UpperCAmelCase , run_time=0.5 ) , MoveToTarget(UpperCAmelCase , run_time=0.5 ) , FadeIn(UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowercase : str =0.7
self.play(
Circumscribe(model_arr[i] , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase , **UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowercase : Tuple =a_c
lowercase : Union[str, Any] =a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(UpperCAmelCase ) , FadeOut(UpperCAmelCase , run_time=0.5 ) , )
lowercase : Tuple =MarkupText(f'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase , run_time=3 ) , MoveToTarget(UpperCAmelCase ) )
self.wait()
| 706 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 8 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : str=1 , UpperCAmelCase : str=0 , UpperCAmelCase : Any=2 , UpperCAmelCase : Optional[Any]=512 , UpperCAmelCase : int="cls" , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : List[str]=True , **UpperCAmelCase : int , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
lowercase : List[str] =project_dim
lowercase : int =pooler_fn
lowercase : int =learn_encoder
lowercase : List[Any] =use_attention_mask
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = [r'''pooler''', r'''logit_scale''']
UpperCamelCase_ = [r'''position_ids''', r'''predictions.decoder.bias''']
UpperCamelCase_ = '''roberta'''
UpperCamelCase_ = RobertaSeriesConfig
def __init__( self : List[Any] , UpperCAmelCase : int ) -> Any:
'''simple docstring'''
super().__init__(UpperCAmelCase )
lowercase : Tuple =XLMRobertaModel(UpperCAmelCase )
lowercase : Optional[Any] =nn.Linear(config.hidden_size , config.project_dim )
lowercase : Tuple =getattr(UpperCAmelCase , '''has_pre_transformation''' , UpperCAmelCase )
if self.has_pre_transformation:
lowercase : Optional[int] =nn.Linear(config.hidden_size , config.project_dim )
lowercase : List[Any] =nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =return_dict if return_dict is not None else self.config.use_return_dict
lowercase : Optional[Any] =self.base_model(
input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , position_ids=UpperCAmelCase , head_mask=UpperCAmelCase , inputs_embeds=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , output_attentions=UpperCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCAmelCase , )
if self.has_pre_transformation:
lowercase : Any =outputs['''hidden_states'''][-2]
lowercase : Dict =self.pre_LN(UpperCAmelCase )
lowercase : str =self.transformation_pre(UpperCAmelCase )
return TransformationModelOutput(
projection_state=UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
lowercase : Dict =self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 707 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''LayoutLMv2ImageProcessor'''
UpperCamelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : List[str] , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Any =kwargs.pop('''feature_extractor''' )
lowercase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase : Tuple =self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Optional[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : List[str] =features['''words''']
lowercase : Optional[Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
lowercase : List[str] =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : str =self.get_overflowing_images(UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Dict =images
return encoded_inputs
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : str =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}' )
return images_with_overflow
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
SCREAMING_SNAKE_CASE = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
SCREAMING_SNAKE_CASE = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
SCREAMING_SNAKE_CASE = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
SCREAMING_SNAKE_CASE = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
SCREAMING_SNAKE_CASE = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = DPRContextEncoderTokenizer
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = DPRQuestionEncoderTokenizer
SCREAMING_SNAKE_CASE = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
SCREAMING_SNAKE_CASE = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
SCREAMING_SNAKE_CASE = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__A )
class UpperCAmelCase_ :
"""simple docstring"""
def __call__( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Union[bool, str] = False , UpperCAmelCase : Union[bool, str] = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Optional[bool] = None , **UpperCAmelCase : Optional[Any] , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors=UpperCAmelCase , return_attention_mask=UpperCAmelCase , **UpperCAmelCase , )
elif titles is None or texts is None:
lowercase : Optional[int] =titles if texts is None else texts
return super().__call__(
UpperCAmelCase , UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors=UpperCAmelCase , return_attention_mask=UpperCAmelCase , **UpperCAmelCase , )
lowercase : int =titles if not isinstance(UpperCAmelCase , UpperCAmelCase ) else [titles]
lowercase : List[str] =texts if not isinstance(UpperCAmelCase , UpperCAmelCase ) else [texts]
lowercase : Optional[int] =len(UpperCAmelCase )
lowercase : Dict =questions if not isinstance(UpperCAmelCase , UpperCAmelCase ) else [questions] * n_passages
assert len(UpperCAmelCase ) == len(
UpperCAmelCase ), f'There should be as many titles than texts but got {len(UpperCAmelCase )} titles and {len(UpperCAmelCase )} texts.'
lowercase : Any =super().__call__(UpperCAmelCase , UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase )['''input_ids''']
lowercase : Union[str, Any] =super().__call__(UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase )['''input_ids''']
lowercase : Optional[Any] ={
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase , UpperCAmelCase )
]
}
if return_attention_mask is not False:
lowercase : Any =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase : Optional[int] =attention_mask
return self.pad(UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors=UpperCAmelCase )
def A__ ( self : Dict , UpperCAmelCase : BatchEncoding , UpperCAmelCase : DPRReaderOutput , UpperCAmelCase : int = 16 , UpperCAmelCase : int = 64 , UpperCAmelCase : int = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
lowercase : str =reader_input['''input_ids''']
lowercase : str =reader_output[:3]
lowercase : Optional[Any] =len(UpperCAmelCase )
lowercase : List[str] =sorted(range(UpperCAmelCase ) , reverse=UpperCAmelCase , key=relevance_logits.__getitem__ )
lowercase : List[DPRReaderOutput] =[]
for doc_id in sorted_docs:
lowercase : Optional[int] =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase : Any =sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase : Tuple =sequence_ids.index(self.pad_token_id )
else:
lowercase : Union[str, Any] =len(UpperCAmelCase )
lowercase : str =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase , top_spans=UpperCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase , start_index=UpperCAmelCase , end_index=UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def A__ ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : List[int] , UpperCAmelCase : int , UpperCAmelCase : int , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
lowercase : Optional[Any] =[]
for start_index, start_score in enumerate(UpperCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase : List[str] =sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x[1] , reverse=UpperCAmelCase )
lowercase : Optional[Any] =[]
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
lowercase : Optional[Any] =end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__A )
class UpperCAmelCase_ ( __A , __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ = DPRReaderTokenizer
| 708 |
'''simple docstring'''
def lowercase_ ( __A : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
"""simple docstring"""
try:
lowercase : Any =int(__A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =1
lowercase : Dict =2
while i * i <= n:
while n % i == 0:
lowercase : Optional[int] =i
n //= i
i += 1
if n > 1:
lowercase : Dict =n
return int(__A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : str , __A : Any=False ) -> Dict:
"""simple docstring"""
lowercase : Tuple =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase : Dict =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowercase_ ( __A : Tuple , __A : str , __A : Tuple=False ) -> int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowercase : List[str] =''''''
else:
lowercase : str ='''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Dict =state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowercase : int =state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase : int =in_proj_weight[
: config.hidden_size, :
]
lowercase : Any =in_proj_bias[: config.hidden_size]
lowercase : List[str] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Optional[int] =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : Tuple =in_proj_weight[
-config.hidden_size :, :
]
lowercase : int =in_proj_bias[-config.hidden_size :]
def lowercase_ ( __A : str ) -> Dict:
"""simple docstring"""
lowercase : int =['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__A , __A )
def lowercase_ ( __A : Optional[Any] , __A : str , __A : Any ) -> Any:
"""simple docstring"""
lowercase : Tuple =dct.pop(__A )
lowercase : Any =val
def lowercase_ ( ) -> List[str]:
"""simple docstring"""
lowercase : Union[str, Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : List[Any] =Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase_ ( __A : Tuple , __A : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase : str =ViTConfig()
lowercase : Optional[int] =False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowercase : Dict =True
lowercase : Union[str, Any] =int(vit_name[-1_2:-1_0] )
lowercase : Tuple =int(vit_name[-9:-6] )
else:
lowercase : Dict =1_0_0_0
lowercase : int ='''huggingface/label-files'''
lowercase : Optional[Any] ='''imagenet-1k-id2label.json'''
lowercase : Any =json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
lowercase : List[str] ={int(__A ): v for k, v in idalabel.items()}
lowercase : List[Any] =idalabel
lowercase : Tuple ={v: k for k, v in idalabel.items()}
lowercase : Optional[int] =int(vit_name[-6:-4] )
lowercase : int =int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
lowercase : str =1_9_2
lowercase : List[str] =7_6_8
lowercase : int =1_2
lowercase : List[str] =3
elif vit_name[9:].startswith('''small''' ):
lowercase : Tuple =3_8_4
lowercase : Tuple =1_5_3_6
lowercase : Dict =1_2
lowercase : int =6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
lowercase : Union[str, Any] =7_6_8
lowercase : Optional[int] =2_3_0_4
lowercase : List[Any] =8
lowercase : Union[str, Any] =8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
lowercase : Optional[Any] =1_0_2_4
lowercase : List[Any] =4_0_9_6
lowercase : Union[str, Any] =2_4
lowercase : int =1_6
elif vit_name[4:].startswith('''huge''' ):
lowercase : Dict =1_2_8_0
lowercase : int =5_1_2_0
lowercase : Optional[int] =3_2
lowercase : int =1_6
# load original model from timm
lowercase : int =timm.create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase : Optional[int] =timm_model.state_dict()
if base_model:
remove_classification_head_(__A )
lowercase : Union[str, Any] =create_rename_keys(__A , __A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A , __A )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowercase : Tuple =ViTModel(__A ).eval()
else:
lowercase : Union[str, Any] =ViTForImageClassification(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowercase : Optional[int] =DeiTImageProcessor(size=config.image_size )
else:
lowercase : int =ViTImageProcessor(size=config.image_size )
lowercase : str =image_processor(images=prepare_img() , return_tensors='''pt''' )
lowercase : Dict =encoding['''pixel_values''']
lowercase : Optional[Any] =model(__A )
if base_model:
lowercase : str =timm_model.forward_features(__A )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__A , outputs.pooler_output , atol=1E-3 )
else:
lowercase : str =timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
Path(__A ).mkdir(exist_ok=__A )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__A )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 709 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
lowercase : str =u
for i in range(1 , __A ):
lowercase : Any =temp * (u - i)
return temp
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : List[str] =int(input('''enter the numbers of values: ''' ) )
lowercase : list[list[float]] =[]
for _ in range(__A ):
y.append([] )
for i in range(__A ):
for j in range(__A ):
y[i].append(__A )
lowercase : List[Any] =0
print('''enter the values of parameters in a list: ''' )
lowercase : Optional[int] =list(map(__A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__A ):
lowercase : str =float(input() )
lowercase : int =int(input('''enter the value to interpolate: ''' ) )
lowercase : Union[str, Any] =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A ):
for j in range(n - i ):
lowercase : str =y[j + 1][i - 1] - y[j][i - 1]
lowercase : Any =y[0][0]
for i in range(1 , __A ):
summ += (ucal(__A , __A ) * y[0][i]) / math.factorial(__A )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 8 | 0 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCAmelCase_ ( __A , __A ):
"""simple docstring"""
@register_to_config
def __init__( self : str , UpperCAmelCase : int = 128 , UpperCAmelCase : int = 256 , UpperCAmelCase : float = 2000.0 , UpperCAmelCase : int = 768 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 64 , UpperCAmelCase : int = 2048 , UpperCAmelCase : float = 0.1 , ) -> int:
'''simple docstring'''
super().__init__()
lowercase : Optional[int] =nn.Sequential(
nn.Linear(UpperCAmelCase , d_model * 4 , bias=UpperCAmelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCAmelCase ) , nn.SiLU() , )
lowercase : Any =nn.Embedding(UpperCAmelCase , UpperCAmelCase )
lowercase : Optional[Any] =False
lowercase : Optional[int] =nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
lowercase : Optional[int] =nn.Dropout(p=UpperCAmelCase )
lowercase : Tuple =nn.ModuleList()
for lyr_num in range(UpperCAmelCase ):
# FiLM conditional T5 decoder
lowercase : Dict =DecoderLayer(d_model=UpperCAmelCase , d_kv=UpperCAmelCase , num_heads=UpperCAmelCase , d_ff=UpperCAmelCase , dropout_rate=UpperCAmelCase )
self.decoders.append(UpperCAmelCase )
lowercase : Optional[int] =TaLayerNorm(UpperCAmelCase )
lowercase : str =nn.Dropout(p=UpperCAmelCase )
lowercase : Dict =nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def A__ ( self : str , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase : int =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowercase : int =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
lowercase : int =self.conditioning_emb(UpperCAmelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowercase : int =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowercase : str =torch.broadcast_to(
torch.arange(UpperCAmelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
lowercase : Tuple =self.position_encoding(UpperCAmelCase )
lowercase : Any =self.continuous_inputs_projection(UpperCAmelCase )
inputs += position_encodings
lowercase : Optional[int] =self.dropout(UpperCAmelCase )
# decoder: No padding present.
lowercase : List[Any] =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowercase : Any =[(x, self.encoder_decoder_mask(UpperCAmelCase , UpperCAmelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowercase : Optional[Any] =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
lowercase : str =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
lowercase : List[str] =lyr(
UpperCAmelCase , conditioning_emb=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )[0]
lowercase : Any =self.decoder_norm(UpperCAmelCase )
lowercase : List[Any] =self.post_dropout(UpperCAmelCase )
lowercase : Any =self.spec_out(UpperCAmelCase )
return spec_out
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : int=1e-6 ) -> Any:
'''simple docstring'''
super().__init__()
lowercase : str =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCAmelCase , d_kv=UpperCAmelCase , num_heads=UpperCAmelCase , dropout_rate=UpperCAmelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCAmelCase , d_kv=UpperCAmelCase , num_heads=UpperCAmelCase , dropout_rate=UpperCAmelCase , layer_norm_epsilon=UpperCAmelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCAmelCase , d_ff=UpperCAmelCase , dropout_rate=UpperCAmelCase , layer_norm_epsilon=UpperCAmelCase ) )
def A__ ( self : Any , UpperCAmelCase : int , UpperCAmelCase : int=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : int=None , ) -> Tuple:
'''simple docstring'''
lowercase : Optional[int] =self.layer[0](
UpperCAmelCase , conditioning_emb=UpperCAmelCase , attention_mask=UpperCAmelCase , )
if encoder_hidden_states is not None:
lowercase : Union[str, Any] =torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
lowercase : List[str] =self.layer[1](
UpperCAmelCase , key_value_states=UpperCAmelCase , attention_mask=UpperCAmelCase , )
# Apply Film Conditional Feed Forward layer
lowercase : List[str] =self.layer[-1](UpperCAmelCase , UpperCAmelCase )
return (hidden_states,)
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase : Optional[int] =TaLayerNorm(UpperCAmelCase )
lowercase : List[str] =TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCAmelCase )
lowercase : Optional[Any] =Attention(query_dim=UpperCAmelCase , heads=UpperCAmelCase , dim_head=UpperCAmelCase , out_bias=UpperCAmelCase , scale_qk=UpperCAmelCase )
lowercase : Any =nn.Dropout(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[int] =self.layer_norm(UpperCAmelCase )
if conditioning_emb is not None:
lowercase : Tuple =self.FiLMLayer(UpperCAmelCase , UpperCAmelCase )
# Self-attention block
lowercase : Tuple =self.attention(UpperCAmelCase )
lowercase : Dict =hidden_states + self.dropout(UpperCAmelCase )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
super().__init__()
lowercase : str =Attention(query_dim=UpperCAmelCase , heads=UpperCAmelCase , dim_head=UpperCAmelCase , out_bias=UpperCAmelCase , scale_qk=UpperCAmelCase )
lowercase : Any =TaLayerNorm(UpperCAmelCase , eps=UpperCAmelCase )
lowercase : Any =nn.Dropout(UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : List[str]=None , UpperCAmelCase : Any=None , ) -> int:
'''simple docstring'''
lowercase : List[str] =self.layer_norm(UpperCAmelCase )
lowercase : List[Any] =self.attention(
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , attention_mask=attention_mask.squeeze(1 ) , )
lowercase : Tuple =hidden_states + self.dropout(UpperCAmelCase )
return layer_output
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
super().__init__()
lowercase : List[str] =TaDenseGatedActDense(d_model=UpperCAmelCase , d_ff=UpperCAmelCase , dropout_rate=UpperCAmelCase )
lowercase : str =TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCAmelCase )
lowercase : Optional[Any] =TaLayerNorm(UpperCAmelCase , eps=UpperCAmelCase )
lowercase : Any =nn.Dropout(UpperCAmelCase )
def A__ ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=None ) -> int:
'''simple docstring'''
lowercase : List[Any] =self.layer_norm(UpperCAmelCase )
if conditioning_emb is not None:
lowercase : Tuple =self.film(UpperCAmelCase , UpperCAmelCase )
lowercase : List[Any] =self.DenseReluDense(UpperCAmelCase )
lowercase : List[str] =hidden_states + self.dropout(UpperCAmelCase )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase : List[str] =nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
lowercase : Dict =nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
lowercase : Optional[Any] =nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
lowercase : Optional[int] =nn.Dropout(UpperCAmelCase )
lowercase : Union[str, Any] =NewGELUActivation()
def A__ ( self : List[str] , UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : int =self.act(self.wi_a(UpperCAmelCase ) )
lowercase : Optional[Any] =self.wi_a(UpperCAmelCase )
lowercase : Dict =hidden_gelu * hidden_linear
lowercase : Optional[int] =self.dropout(UpperCAmelCase )
lowercase : Union[str, Any] =self.wo(UpperCAmelCase )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : str , UpperCAmelCase : Tuple=1e-6 ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowercase : str =nn.Parameter(torch.ones(UpperCAmelCase ) )
lowercase : Union[str, Any] =eps
def A__ ( self : List[Any] , UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
lowercase : Optional[int] =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCAmelCase )
lowercase : Dict =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowercase : Union[str, Any] =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def A__ ( self : Union[str, Any] , UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(UpperCAmelCase , 3.0 )) ))
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
super().__init__()
lowercase : Any =nn.Linear(UpperCAmelCase , out_features * 2 , bias=UpperCAmelCase )
def A__ ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =self.scale_bias(UpperCAmelCase )
lowercase : Dict =torch.chunk(UpperCAmelCase , 2 , -1 )
lowercase : Optional[int] =x * (1 + scale) + shift
return x
| 710 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =0
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : str =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : int =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str =CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase : Optional[int] =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : Optional[Any] =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase : str =CLIPImageProcessor(**UpperCAmelCase )
# save in new folder
model_config.save_pretrained(UpperCAmelCase )
config.save_pretrained(UpperCAmelCase )
lowercase : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : int =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained('''clip-base''' )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Optional[int] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
lowercase : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
lowercase : List[str] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[int] =CustomImageProcessor.from_pretrained(UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self : Any ) -> Any:
'''simple docstring'''
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# If remote code is not set, the default is to use local
lowercase : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase : Tuple =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Dict =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 8 | 0 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : Callable , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[dict] = None , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : str , ) -> str:
'''simple docstring'''
super().__init__(
features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , num_proc=UpperCAmelCase , **UpperCAmelCase , )
lowercase : Tuple =Generator(
cache_dir=UpperCAmelCase , features=UpperCAmelCase , generator=UpperCAmelCase , gen_kwargs=UpperCAmelCase , **UpperCAmelCase , )
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
if self.streaming:
lowercase : Optional[Any] =self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
lowercase : Union[str, Any] =None
lowercase : Dict =None
lowercase : Union[str, Any] =None
lowercase : Tuple =None
self.builder.download_and_prepare(
download_config=UpperCAmelCase , download_mode=UpperCAmelCase , verification_mode=UpperCAmelCase , base_path=UpperCAmelCase , num_proc=self.num_proc , )
lowercase : Optional[Any] =self.builder.as_dataset(
split='''train''' , verification_mode=UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 711 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE = 8.988E9 # units = N * m^s * C^-2
def lowercase_ ( __A : float , __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
lowercase : Dict =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowercase : Union[str, Any] =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase : Tuple =(COULOMBS_CONSTANT * charge_product / abs(__A )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : int , *UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCAmelCase : List[str] ):
'''simple docstring'''
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , **UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
return {}, {}, {}
def A__ ( self : Optional[Any] , UpperCAmelCase : Optional[int] ):
'''simple docstring'''
lowercase : List[Any] =load_image(UpperCAmelCase )
lowercase : Optional[Any] =image.size
lowercase : int =self.image_processor(images=UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def A__ ( self : str , UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] =self.model(**UpperCAmelCase )
return model_outputs
def A__ ( self : Tuple , UpperCAmelCase : List[str] ):
'''simple docstring'''
lowercase : List[str] =model_outputs.predicted_depth
lowercase : List[str] =torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=UpperCAmelCase )
lowercase : List[Any] =prediction.squeeze().cpu().numpy()
lowercase : Union[str, Any] =(output * 255 / np.max(UpperCAmelCase )).astype('''uint8''' )
lowercase : Dict =Image.fromarray(UpperCAmelCase )
lowercase : Optional[Any] ={}
lowercase : Union[str, Any] =predicted_depth
lowercase : int =depth
return output_dict
| 712 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 713 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =list(poly_a or [0] )[:]
lowercase : Dict =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : Tuple =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[int] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : str =self.__multiply()
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase : List[Any] =self.c_max_length // 2
while next_ncol > 0:
lowercase : str =[[] for i in range(UpperCAmelCase )]
lowercase : List[str] =self.root**next_ncol
# First half of next step
lowercase : Union[str, Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Tuple =new_dft
lowercase : List[Any] =next_ncol // 2
return dft[0]
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.__dft('''A''' )
lowercase : Union[str, Any] =self.__dft('''B''' )
lowercase : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Tuple =2
while next_ncol <= self.c_max_length:
lowercase : Tuple =[[] for i in range(UpperCAmelCase )]
lowercase : Tuple =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : List[str] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] ='''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] ='''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
lowercase
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 714 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : List[Any] =str(bin(__A ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : Union[str, Any] =str(bin(__A ) )[2:]
if shift_amount >= len(__A ):
return "0b0"
lowercase : Any =binary_number[: len(__A ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
lowercase : str ='''0''' + str(bin(__A ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase : Dict =len(bin(__A )[3:] ) # Find 2's complement of number
lowercase : Optional[Any] =bin(abs(__A ) - (1 << binary_number_length) )[3:]
lowercase : int =(
'''1''' + '''0''' * (binary_number_length - len(__A )) + binary_number
)
if shift_amount >= len(__A ):
return "0b" + binary_number[0] * len(__A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int]=13 , UpperCAmelCase : Dict=[30, 30] , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Any=3 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : int=32 , UpperCAmelCase : Any=5 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : str=37 , UpperCAmelCase : Optional[Any]="gelu" , UpperCAmelCase : str=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : str=10 , UpperCAmelCase : Optional[int]=0.0_2 , UpperCAmelCase : Dict=3 , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[int]=8 , UpperCAmelCase : Union[str, Any]=10 , ) -> Optional[int]:
'''simple docstring'''
lowercase : int =parent
lowercase : str =batch_size
lowercase : List[Any] =image_size
lowercase : Tuple =patch_size
lowercase : str =num_channels
lowercase : int =is_training
lowercase : Dict =use_labels
lowercase : Dict =hidden_size
lowercase : Any =num_hidden_layers
lowercase : Optional[Any] =num_attention_heads
lowercase : Optional[int] =intermediate_size
lowercase : int =hidden_act
lowercase : Dict =hidden_dropout_prob
lowercase : int =attention_probs_dropout_prob
lowercase : List[str] =type_sequence_label_size
lowercase : Any =initializer_range
lowercase : str =num_labels
lowercase : str =scope
lowercase : Dict =n_targets
lowercase : Any =num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase : int =(image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase : Tuple =num_patches + 1 + self.num_detection_tokens
def A__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase : Dict =None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase : Union[str, Any] =[]
for i in range(self.batch_size ):
lowercase : Tuple ={}
lowercase : str =torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCAmelCase )
lowercase : List[Any] =torch.rand(self.n_targets , 4 , device=UpperCAmelCase )
labels.append(UpperCAmelCase )
lowercase : Any =self.get_config()
return config, pixel_values, labels
def A__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def A__ ( self : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Optional[Any] =YolosModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def A__ ( self : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
lowercase : Tuple =YolosForObjectDetection(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : Tuple =model(pixel_values=UpperCAmelCase )
lowercase : str =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase : str =model(pixel_values=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def A__ ( self : str ) -> List[Any]:
'''simple docstring'''
lowercase : Dict =self.prepare_config_and_inputs()
lowercase : Union[str, Any] =config_and_inputs
lowercase : Union[str, Any] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCamelCase_ = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : int=False ) -> Any:
'''simple docstring'''
lowercase : Dict =super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase : Any =[]
for i in range(self.model_tester.batch_size ):
lowercase : Optional[Any] ={}
lowercase : int =torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCAmelCase , dtype=torch.long )
lowercase : Tuple =torch.ones(
self.model_tester.n_targets , 4 , device=UpperCAmelCase , dtype=torch.float )
labels.append(UpperCAmelCase )
lowercase : int =labels
return inputs_dict
def A__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowercase : List[str] =YolosModelTester(self )
lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
def A__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[int] =model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase : List[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A__ ( self : List[Any] ) -> Any:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Any =model_class(UpperCAmelCase )
lowercase : List[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : str =[*signature.parameters.keys()]
lowercase : Optional[int] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Any =True
# in YOLOS, the seq_len is different
lowercase : Union[str, Any] =self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase : Optional[int] =True
lowercase : int =False
lowercase : Any =True
lowercase : Optional[Any] =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase : Optional[int] =model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase : List[Any] =outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase : Dict =True
lowercase : List[Any] =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase : List[str] =model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase : int =outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase : Dict =len(UpperCAmelCase )
# Check attention is always last and order is fine
lowercase : Optional[int] =True
lowercase : Tuple =True
lowercase : Dict =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase : Tuple =model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase : Optional[int] =1
self.assertEqual(out_len + added_hidden_states , len(UpperCAmelCase ) )
lowercase : Optional[int] =outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def A__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] ):
lowercase : List[Any] =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase : Dict =model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase : Optional[Any] =outputs.hidden_states
lowercase : Any =getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# YOLOS has a different seq_length
lowercase : int =self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[Any] =True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Dict =True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCAmelCase )
@slow
def A__ ( self : Any ) -> int:
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : str =YolosModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def lowercase_ ( ) -> List[str]:
"""simple docstring"""
lowercase : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] =YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCAmelCase )
lowercase : Optional[int] =self.default_image_processor
lowercase : Optional[Any] =prepare_img()
lowercase : Tuple =image_processor(images=UpperCAmelCase , return_tensors='''pt''' ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase : Optional[Any] =model(inputs.pixel_values )
# verify outputs
lowercase : Dict =torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowercase : Optional[int] =torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=UpperCAmelCase , )
lowercase : str =torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
# verify postprocessing
lowercase : int =image_processor.post_process_object_detection(
UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase : Union[str, Any] =torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(UpperCAmelCase )
lowercase : List[Any] =[75, 75, 17, 63, 17]
lowercase : Union[str, Any] =torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(UpperCAmelCase )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCAmelCase , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCAmelCase )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCAmelCase ) )
| 715 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def A__ ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
pass
def lowercase_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =pipeline(
'''document-question-answering''' , model=UpperCAmelCase , tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Optional[Any] =INVOICE_URL
lowercase : Any =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
lowercase : Dict ='''What is the placebo?'''
lowercase : Optional[Any] =[
{
'''image''': load_image(UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =dqa_pipeline(UpperCAmelCase , top_k=2 )
self.assertEqual(
UpperCAmelCase , [
[
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Dict =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
lowercase : Union[str, Any] =INVOICE_URL
lowercase : Tuple ='''How many cats are there?'''
lowercase : Optional[int] =[
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowercase : Optional[Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
lowercase : List[str] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase : Optional[Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Any =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase : int ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Dict =[]
lowercase : str =[]
lowercase : str =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , words=UpperCAmelCase , boxes=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
lowercase : Dict =INVOICE_URL
lowercase : str ='''What is the invoice number?'''
lowercase : List[str] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
lowercase : Dict =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Union[str, Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : int =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Any =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : str ) -> Dict:
'''simple docstring'''
lowercase : Any =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : Tuple =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , )
lowercase : Tuple =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Dict =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : Any =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : str =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
lowercase : Tuple =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Dict =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : Dict =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : List[Any] =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
lowercase : str =INVOICE_URL
lowercase : int ='''What is the invoice number?'''
lowercase : Tuple =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Union[str, Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
lowercase : List[str] =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Union[str, Any] =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
lowercase : Any =INVOICE_URL
lowercase : Union[str, Any] ='''What is the invoice number?'''
lowercase : int =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
pass
| 8 | 0 |
'''simple docstring'''
def lowercase_ ( __A : str ) -> Tuple: # noqa: E741
"""simple docstring"""
lowercase : Optional[int] =len(__A )
lowercase : Optional[Any] =0
lowercase : Any =[0] * n
lowercase : List[Any] =[False] * n
lowercase : List[Any] =[False] * n
def dfs(__A : Any , __A : int , __A : Tuple , __A : Any ):
if parent == root:
out_edge_count += 1
lowercase : List[str] =True
lowercase : Tuple =at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowercase : int =dfs(__A , __A , __A , __A )
lowercase : Optional[int] =min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowercase : List[str] =True
# AP found via cycle
if at == low[to]:
lowercase : Optional[int] =True
else:
lowercase : int =min(low[at] , __A )
return out_edge_count
for i in range(__A ):
if not visited[i]:
lowercase : Optional[Any] =0
lowercase : List[str] =dfs(__A , __A , -1 , __A )
lowercase : Dict =out_edge_count > 1
for x in range(len(__A ) ):
if is_art[x] is True:
print(__A )
# Adjacency list of graph
SCREAMING_SNAKE_CASE = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 716 |
'''simple docstring'''
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A ) , __A )
return number - int(__A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 8 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : List[Any] , UpperCAmelCase : Any ) -> Dict:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
lowercase : str =model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self : List[Any] ) -> int:
'''simple docstring'''
lowercase : Any ='''sshleifer/tiny-gpt2'''
lowercase : int =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCAmelCase , multi_process=UpperCAmelCase , )
lowercase : List[Any] =TensorFlowBenchmark(UpperCAmelCase )
lowercase : int =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self : List[str] ) -> int:
'''simple docstring'''
lowercase : Dict ='''sgugger/tiny-distilbert-classification'''
lowercase : Tuple =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , only_pretrain_model=UpperCAmelCase , )
lowercase : List[str] =TensorFlowBenchmark(UpperCAmelCase )
lowercase : Union[str, Any] =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''sshleifer/tiny-gpt2'''
lowercase : Dict =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
lowercase : int =TensorFlowBenchmark(UpperCAmelCase )
lowercase : Dict =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : List[Any] ='''sshleifer/tiny-gpt2'''
lowercase : Dict =AutoConfig.from_pretrained(UpperCAmelCase )
lowercase : str =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCAmelCase , multi_process=UpperCAmelCase , )
lowercase : Union[str, Any] =TensorFlowBenchmark(UpperCAmelCase , [config] )
lowercase : Optional[Any] =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self : Any ) -> Dict:
'''simple docstring'''
lowercase : List[str] ='''sshleifer/tiny-gpt2'''
lowercase : Any =AutoConfig.from_pretrained(UpperCAmelCase )
lowercase : Dict =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
lowercase : Optional[Any] =TensorFlowBenchmark(UpperCAmelCase , [config] )
lowercase : Dict =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self : Any ) -> Tuple:
'''simple docstring'''
lowercase : Optional[int] ='''sshleifer/tiny-gpt2'''
lowercase : Union[str, Any] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
lowercase : Tuple =TensorFlowBenchmark(UpperCAmelCase )
lowercase : Union[str, Any] =benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str ='''sshleifer/tiny-gpt2'''
lowercase : int =AutoConfig.from_pretrained(UpperCAmelCase )
lowercase : Dict =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
lowercase : Any =TensorFlowBenchmark(UpperCAmelCase , [config] )
lowercase : Dict =benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
lowercase : Union[str, Any] ='''patrickvonplaten/t5-tiny-random'''
lowercase : str =AutoConfig.from_pretrained(UpperCAmelCase )
lowercase : Optional[Any] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
lowercase : int =TensorFlowBenchmark(UpperCAmelCase , configs=[config] )
lowercase : int =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def A__ ( self : int ) -> Dict:
'''simple docstring'''
lowercase : List[Any] ='''sshleifer/tiny-gpt2'''
lowercase : str =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=UpperCAmelCase , multi_process=UpperCAmelCase , )
lowercase : int =TensorFlowBenchmark(UpperCAmelCase )
lowercase : Union[str, Any] =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : str ='''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : List[str] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCAmelCase , save_to_csv=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCAmelCase , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(UpperCAmelCase , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(UpperCAmelCase , '''env.csv''' ) , multi_process=UpperCAmelCase , )
lowercase : Optional[Any] =TensorFlowBenchmark(UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCAmelCase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase , '''env.csv''' ) ).exists() )
def A__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : int ='''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(UpperCAmelCase : List[Any] ):
self.assertTrue(hasattr(UpperCAmelCase , '''sequential''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''cumulative''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''current''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Union[str, Any] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCAmelCase , '''log.txt''' ) , log_print=UpperCAmelCase , trace_memory_line_by_line=UpperCAmelCase , eager_mode=UpperCAmelCase , multi_process=UpperCAmelCase , )
lowercase : Any =TensorFlowBenchmark(UpperCAmelCase )
lowercase : Optional[int] =benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(UpperCAmelCase , '''log.txt''' ) ).exists() )
| 717 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : List[Any] , __A : int , __A : int ) -> Optional[int]:
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowercase_ ( __A : np.ndarray , __A : Optional[str] , __A : Optional[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : int =to_pil_image(__A )
lowercase , lowercase : Tuple =pil_image.size
lowercase : Optional[Any] =pytesseract.image_to_data(__A , lang=__A , output_type='''dict''' , config=__A )
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase : Dict =[idx for idx, word in enumerate(__A ) if not word.strip()]
lowercase : str =[word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : Optional[int] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : List[Any] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : str =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : int =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase : Tuple =[]
for x, y, w, h in zip(__A , __A , __A , __A ):
lowercase : str =[x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
lowercase : List[str] =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : float = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = "" , **UpperCAmelCase : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Tuple =size if size is not None else {'''height''': 224, '''width''': 224}
lowercase : Optional[Any] =get_size_dict(UpperCAmelCase )
lowercase : Optional[Any] =do_resize
lowercase : List[Any] =size
lowercase : List[str] =resample
lowercase : Dict =do_rescale
lowercase : str =rescale_value
lowercase : Optional[int] =do_normalize
lowercase : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Union[str, Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase : List[Any] =apply_ocr
lowercase : Union[str, Any] =ocr_lang
lowercase : str =tesseract_config
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase : Tuple =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase : Optional[Any] =(size['''height'''], size['''width'''])
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase : Optional[int] =do_resize if do_resize is not None else self.do_resize
lowercase : Tuple =size if size is not None else self.size
lowercase : Optional[int] =get_size_dict(UpperCAmelCase )
lowercase : List[str] =resample if resample is not None else self.resample
lowercase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Optional[int] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[Any] =image_mean if image_mean is not None else self.image_mean
lowercase : Optional[int] =image_std if image_std is not None else self.image_std
lowercase : Any =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase : Any =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase : Dict =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase : str =make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowercase : Tuple =[to_numpy_array(UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase : int =[]
lowercase : Tuple =[]
for image in images:
lowercase , lowercase : Dict =apply_tesseract(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
words_batch.append(UpperCAmelCase )
boxes_batch.append(UpperCAmelCase )
if do_resize:
lowercase : int =[self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowercase : str =[self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowercase : Optional[Any] =[to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase : Dict =BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase )
if apply_ocr:
lowercase : int =words_batch
lowercase : List[str] =boxes_batch
return data
| 8 | 0 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
SCREAMING_SNAKE_CASE = 'pt'
elif is_tf_available():
SCREAMING_SNAKE_CASE = 'tf'
else:
SCREAMING_SNAKE_CASE = 'jax'
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = ByTaTokenizer
UpperCamelCase_ = False
def A__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
lowercase : str =ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def A__ ( self : str , **UpperCAmelCase : Any ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A__ ( self : str , UpperCAmelCase : Any , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=20 , UpperCAmelCase : Union[str, Any]=5 ) -> Tuple[str, list]:
'''simple docstring'''
lowercase : int =[]
for i in range(len(UpperCAmelCase ) ):
try:
lowercase : Any =tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase : Any =list(filter(lambda UpperCAmelCase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , UpperCAmelCase ) )
lowercase : Union[str, Any] =list(filter(lambda UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCAmelCase ) , UpperCAmelCase ) )
if max_length is not None and len(UpperCAmelCase ) > max_length:
lowercase : Dict =toks[:max_length]
if min_length is not None and len(UpperCAmelCase ) < min_length and len(UpperCAmelCase ) > 0:
while len(UpperCAmelCase ) < min_length:
lowercase : Dict =toks + toks
# toks_str = [t[1] for t in toks]
lowercase : Union[str, Any] =[t[0] for t in toks]
# Ensure consistency
lowercase : Optional[Any] =tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
if " " not in output_txt and len(UpperCAmelCase ) > 1:
lowercase : List[str] =(
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCAmelCase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCAmelCase )
)
if with_prefix_space:
lowercase : Any =''' ''' + output_txt
lowercase : Optional[int] =tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
return output_txt, output_ids
def A__ ( self : Optional[int] ) -> str:
'''simple docstring'''
lowercase : Any =self.ta_base_tokenizer
lowercase : str =tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
lowercase : int =tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def A__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[int] =self.ta_base_tokenizer
lowercase : List[str] ='''Unicode €.'''
lowercase : Optional[Any] =tokenizer(UpperCAmelCase )
lowercase : List[Any] =[88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , UpperCAmelCase )
# decoding
lowercase : List[str] =tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , '''Unicode €.</s>''' )
lowercase : Dict =tokenizer('''e è é ê ë''' )
lowercase : List[str] =[104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , UpperCAmelCase )
# decoding
lowercase : str =tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def A__ ( self : Optional[int] ) -> str:
'''simple docstring'''
lowercase : Tuple =self.ta_base_tokenizer
lowercase : Optional[Any] =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowercase : int =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowercase : Dict =tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
if FRAMEWORK != "jax":
lowercase : List[Any] =list(batch.input_ids.numpy()[0] )
else:
lowercase : int =list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def A__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Union[str, Any] =self.ta_base_tokenizer
lowercase : Any =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase : Dict =tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , UpperCAmelCase )
self.assertIn('''attention_mask''' , UpperCAmelCase )
self.assertNotIn('''decoder_input_ids''' , UpperCAmelCase )
self.assertNotIn('''decoder_attention_mask''' , UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : int =self.ta_base_tokenizer
lowercase : Dict =[
'''Summary of the text.''',
'''Another summary.''',
]
lowercase : List[Any] =tokenizer(
text_target=UpperCAmelCase , max_length=32 , padding='''max_length''' , truncation=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def A__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] =self.ta_base_tokenizer
lowercase : Union[str, Any] =['''A long paragraph for summarization. </s>''']
lowercase : Dict =['''Summary of the text. </s>''']
# fmt: off
lowercase : Tuple =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowercase : Optional[int] =[86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowercase : Tuple =tokenizer(UpperCAmelCase , text_target=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , batch['''input_ids'''][0] )
self.assertEqual(UpperCAmelCase , batch['''labels'''][0] )
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : Optional[int] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowercase : Optional[int] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase : Any =tempfile.mkdtemp()
lowercase : Dict =''' He is very happy, UNwant\u00E9d,running'''
lowercase : Optional[int] =tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
lowercase : str =tokenizer.__class__.from_pretrained(UpperCAmelCase )
lowercase : Dict =after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
shutil.rmtree(UpperCAmelCase )
lowercase : Tuple =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase : Optional[int] =tempfile.mkdtemp()
lowercase : Dict =''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowercase : Optional[Any] =tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowercase : str =tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
lowercase : Tuple =tokenizer.__class__.from_pretrained(UpperCAmelCase )
lowercase : Optional[int] =after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowercase : List[Any] =tokenizer.__class__.from_pretrained(UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
lowercase : Optional[int] =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowercase : List[Any] =json.load(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowercase : int =json.load(UpperCAmelCase )
lowercase : List[str] =[f'<extra_id_{i}>' for i in range(125 )]
lowercase : Dict =added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowercase : List[Any] =added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(UpperCAmelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase : Optional[int] =tokenizer_class.from_pretrained(
UpperCAmelCase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase : List[Any] =added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=UpperCAmelCase )]
lowercase : int =tokenizer_class.from_pretrained(
UpperCAmelCase , additional_special_tokens=UpperCAmelCase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def A__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowercase : Optional[Any] =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase )
lowercase : Union[str, Any] =tokenizer_class.from_pretrained(UpperCAmelCase )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def A__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def A__ ( self : Any ) -> List[str]:
'''simple docstring'''
pass
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
pass
def A__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowercase : Tuple =self.get_tokenizers(fast=UpperCAmelCase , do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowercase : Dict =['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
lowercase : Optional[int] =tokenizer.convert_tokens_to_string(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowercase : List[str] =[
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
lowercase : Dict =0
lowercase : Any =tokenizer.convert_ids_to_tokens(
UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
for attr in attributes_list:
setattr(UpperCAmelCase , attr + '''_id''' , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , attr + '''_id''' ) , UpperCAmelCase )
setattr(UpperCAmelCase , attr + '''_id''' , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , attr + '''_id''' ) , UpperCAmelCase )
setattr(UpperCAmelCase , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens_ids''' ) , [] )
setattr(UpperCAmelCase , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 718 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =parent
lowercase : Any =13
lowercase : Any =7
lowercase : Optional[int] =True
lowercase : Optional[int] =True
lowercase : Tuple =False
lowercase : Optional[Any] =True
lowercase : Dict =99
lowercase : Union[str, Any] =32
lowercase : Union[str, Any] =2
lowercase : Union[str, Any] =4
lowercase : List[str] =37
lowercase : str ='''gelu'''
lowercase : Dict =0.1
lowercase : List[Any] =0.1
lowercase : List[str] =512
lowercase : Optional[int] =16
lowercase : Optional[Any] =2
lowercase : List[str] =0.0_2
lowercase : Any =3
lowercase : Optional[Any] =4
lowercase : int =None
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
lowercase : Any =None
lowercase : str =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =TFDistilBertModel(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(UpperCAmelCase )
lowercase : str =[input_ids, input_mask]
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =TFDistilBertForMaskedLM(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str =TFDistilBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Optional[Any] =TFDistilBertForSequenceClassification(UpperCAmelCase )
lowercase : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Optional[int] =TFDistilBertForMultipleChoice(UpperCAmelCase )
lowercase : Optional[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Tuple =TFDistilBertForTokenClassification(UpperCAmelCase )
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : str =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Union[str, Any] =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : str =TFDistilBertModelTester(self )
lowercase : int =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase : Union[str, Any] =TFDistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : Tuple =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[Any] =model(UpperCAmelCase )[0]
lowercase : str =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Optional[int] =tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 8 | 0 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =list(poly_a or [0] )[:]
lowercase : Dict =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : Tuple =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[int] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : str =self.__multiply()
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase : List[Any] =self.c_max_length // 2
while next_ncol > 0:
lowercase : str =[[] for i in range(UpperCAmelCase )]
lowercase : List[str] =self.root**next_ncol
# First half of next step
lowercase : Union[str, Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Tuple =new_dft
lowercase : List[Any] =next_ncol // 2
return dft[0]
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.__dft('''A''' )
lowercase : Union[str, Any] =self.__dft('''B''' )
lowercase : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Tuple =2
while next_ncol <= self.c_max_length:
lowercase : Tuple =[[] for i in range(UpperCAmelCase )]
lowercase : Tuple =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : List[str] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] ='''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] ='''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
def lowercase_ ( __A : int ) -> list:
"""simple docstring"""
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
lowercase : int =gray_code_sequence_string(__A )
#
# convert them to integers
for i in range(len(__A ) ):
lowercase : Tuple =int(sequence[i] , 2 )
return sequence
def lowercase_ ( __A : int ) -> list:
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowercase : Tuple =1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowercase : Union[str, Any] =gray_code_sequence_string(bit_count - 1 )
lowercase : Optional[Any] =[]
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowercase : Union[str, Any] ='''0''' + smaller_sequence[i]
sequence.append(__A )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowercase : Tuple ='''1''' + smaller_sequence[i]
sequence.append(__A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version('torch'))
def lowercase_ ( __A : Union[str, Version] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase : Any =STR_OPERATION_TO_FUNC[operation]
if isinstance(__A , __A ):
lowercase : List[Any] =parse(importlib.metadata.version(__A ) )
return operation(__A , parse(__A ) )
def lowercase_ ( __A : str , __A : str ) -> Tuple:
"""simple docstring"""
return compare_versions(__A , __A , __A )
| 8 | 0 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int , __A : int ) -> float:
"""simple docstring"""
lowercase : Union[str, Any] =(num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowercase_ ( ) -> Union[str, Any]:
"""simple docstring"""
print(sum_of_series(1 , 1 , 1_0 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase_ ( __A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
lowercase : List[Any] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
lowercase : List[str] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
lowercase : Union[str, Any] =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 8 | 0 |
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def lowercase_ ( __A : list[int] , __A : list[int] , __A : int ) -> list[int]:
"""simple docstring"""
lowercase : Tuple =[0] * no_of_processes
lowercase : Any =[0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__A ):
lowercase : List[str] =burst_time[i]
lowercase : Any =0
lowercase : Optional[Any] =0
lowercase : str =9_9_9_9_9_9_9_9_9
lowercase : List[str] =0
lowercase : Tuple =False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__A ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
lowercase : Optional[Any] =remaining_time[j]
lowercase : Optional[int] =j
lowercase : Union[str, Any] =True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
lowercase : Optional[int] =remaining_time[short]
if minm == 0:
lowercase : Optional[Any] =9_9_9_9_9_9_9_9_9
if remaining_time[short] == 0:
complete += 1
lowercase : Optional[int] =False
# Find finish time of current process
lowercase : Dict =increment_time + 1
# Calculate waiting time
lowercase : Tuple =finish_time - arrival_time[short]
lowercase : int =finar - burst_time[short]
if waiting_time[short] < 0:
lowercase : List[Any] =0
# Increment time
increment_time += 1
return waiting_time
def lowercase_ ( __A : list[int] , __A : int , __A : list[int] ) -> list[int]:
"""simple docstring"""
lowercase : List[Any] =[0] * no_of_processes
for i in range(__A ):
lowercase : Union[str, Any] =burst_time[i] + waiting_time[i]
return turn_around_time
def lowercase_ ( __A : list[int] , __A : list[int] , __A : int ) -> None:
"""simple docstring"""
lowercase : Tuple =0
lowercase : Tuple =0
for i in range(__A ):
lowercase : Any =total_waiting_time + waiting_time[i]
lowercase : Tuple =total_turn_around_time + turn_around_time[i]
print(F'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
SCREAMING_SNAKE_CASE = int(input())
SCREAMING_SNAKE_CASE = [0] * no_of_processes
SCREAMING_SNAKE_CASE = [0] * no_of_processes
SCREAMING_SNAKE_CASE = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = map(int, input().split())
SCREAMING_SNAKE_CASE = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE = burst_time
SCREAMING_SNAKE_CASE = no_of_processes
SCREAMING_SNAKE_CASE = waiting_time
SCREAMING_SNAKE_CASE = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
SCREAMING_SNAKE_CASE = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = field(default=__A , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
UpperCamelCase_ = field(
default=__A , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
UpperCamelCase_ = field(
default=__A , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
UpperCamelCase_ = field(
default=__A , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
UpperCamelCase_ = field(
default=__A , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Union[str, Any] =super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : List[str] =v.to_dict()
return d
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
def lowercase_ ( __A : list , __A : int , __A : int = 0 , __A : int = 0 ) -> int:
"""simple docstring"""
lowercase : List[Any] =right or len(__A ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__A , __A , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
SCREAMING_SNAKE_CASE = {'allegro/herbert-base-cased': 514}
SCREAMING_SNAKE_CASE = {}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = HerbertTokenizer
def __init__( self : Dict , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : str="<pad>" , UpperCAmelCase : Optional[Any]="<mask>" , UpperCAmelCase : List[str]="</s>" , **UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sep_token=UpperCAmelCase , **UpperCAmelCase , )
def A__ ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : List[Any] =[self.cls_token_id]
lowercase : Any =[self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[Any] =[self.sep_token_id]
lowercase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase : List[Any] =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 8 | 0 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : List[Any] =str(bin(__A ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : Union[str, Any] =str(bin(__A ) )[2:]
if shift_amount >= len(__A ):
return "0b0"
lowercase : Any =binary_number[: len(__A ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
lowercase : str ='''0''' + str(bin(__A ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase : Dict =len(bin(__A )[3:] ) # Find 2's complement of number
lowercase : Optional[Any] =bin(abs(__A ) - (1 << binary_number_length) )[3:]
lowercase : int =(
'''1''' + '''0''' * (binary_number_length - len(__A )) + binary_number
)
if shift_amount >= len(__A ):
return "0b" + binary_number[0] * len(__A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Any ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[int] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Any ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Tuple ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : List[str] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : Tuple =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Optional[Any] ='''1'''
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : str ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Optional[int] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Optional[Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : str =self.get_env()
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Any ='''1'''
lowercase : Optional[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''
from transformers import pipeline
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase : Tuple =self.get_env()
lowercase : Optional[int] ='''1'''
lowercase : Union[str, Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase : Dict =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] ='''
from transformers import AutoModel
'''
lowercase : Dict ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : int =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : List[str] ='''1'''
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 8 | 0 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : Tuple =mock.Mock()
lowercase : Optional[Any] =500
lowercase : Optional[Any] ={}
lowercase : List[str] =HTTPError
lowercase : List[str] ={}
# Download this model to make sure it's in the cache.
lowercase : Optional[Any] =BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase ) as mock_head:
lowercase : Optional[int] =BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A__ ( self : Any ) -> Dict:
'''simple docstring'''
lowercase : int =mock.Mock()
lowercase : Dict =500
lowercase : List[Any] ={}
lowercase : Union[str, Any] =HTTPError
lowercase : str ={}
# Download this model to make sure it's in the cache.
lowercase : Optional[Any] =GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase ) as mock_head:
lowercase : Any =GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
try:
lowercase : int =tempfile.mktemp()
with open(UpperCAmelCase , '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , UpperCAmelCase )
lowercase : Optional[int] =AlbertTokenizer.from_pretrained(UpperCAmelCase )
finally:
os.remove(UpperCAmelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , UpperCAmelCase )
lowercase : Tuple =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowercase : Dict =AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def A__ ( cls : Any ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def A__ ( cls : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def A__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Union[str, Any] =os.path.join(UpperCAmelCase , '''vocab.txt''' )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase : Optional[Any] =BertTokenizer(UpperCAmelCase )
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token )
lowercase : Any =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase , repo_id='''test-tokenizer''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowercase : Any =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : str =os.path.join(UpperCAmelCase , '''vocab.txt''' )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase : str =BertTokenizer(UpperCAmelCase )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token )
lowercase : List[Any] =BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
UpperCAmelCase , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowercase : Optional[int] =BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : List[Any] =os.path.join(UpperCAmelCase , '''vocab.txt''' )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase : List[str] =CustomTokenizer(UpperCAmelCase )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
lowercase : Any =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' , trust_remote_code=UpperCAmelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : int =os.path.join(UpperCAmelCase , '''vocab.txt''' )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase : Union[str, Any] =BertTokenizerFast.from_pretrained(UpperCAmelCase )
bert_tokenizer.save_pretrained(UpperCAmelCase )
lowercase : Optional[Any] =CustomTokenizerFast.from_pretrained(UpperCAmelCase )
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
lowercase : List[Any] =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' , trust_remote_code=UpperCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' )
lowercase : Dict =AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' , use_fast=UpperCAmelCase , trust_remote_code=UpperCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : List[str] =Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def A__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowercase : int =Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def A__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowercase : str =Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] )
def A__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
lowercase : Union[str, Any] =Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def A__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] )
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : List[str] =Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] )
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] =Trie()
lowercase : Optional[int] =trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(UpperCAmelCase , ['''AB''', '''C'''] )
| 704 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : int ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowercase : Stack[int] =Stack()
lowercase : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
lowercase : Optional[Any] =operator_stack.peek()
operator_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : List[str] =operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 8 | 0 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowercase_ ( __A : int ) -> Optional[int]: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowercase_ ( ) -> Any:
"""simple docstring"""
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
lowercase : List[str] =[1, 2, 3]
with pytest.raises(__A ):
with parallel_backend('''unsupported backend''' ):
map_nested(__A , __A , num_proc=2 )
with pytest.raises(__A ):
with parallel_backend('''unsupported backend''' ):
map_nested(__A , __A , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def lowercase_ ( __A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any =[1, 2]
lowercase : Dict ={'''a''': 1, '''b''': 2}
lowercase : str ={'''a''': [1, 2], '''b''': [3, 4]}
lowercase : List[str] ={'''a''': {'''1''': 1}, '''b''': 2}
lowercase : Union[str, Any] ={'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowercase : str =[2, 3]
lowercase : Dict ={'''a''': 2, '''b''': 3}
lowercase : int ={'''a''': [2, 3], '''b''': [4, 5]}
lowercase : Tuple ={'''a''': {'''1''': 2}, '''b''': 3}
lowercase : Tuple ={'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
| 705 |
'''simple docstring'''
import re
def lowercase_ ( __A : str ) -> bool:
"""simple docstring"""
lowercase : Any =re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__A , __A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 8 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''input_features''', '''attention_mask''']
def __init__( self : Dict , UpperCAmelCase : List[str]=80 , UpperCAmelCase : Optional[int]=1_6000 , UpperCAmelCase : Tuple=80 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : str=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : str=True , **UpperCAmelCase : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(feature_size=UpperCAmelCase , sampling_rate=UpperCAmelCase , padding_value=UpperCAmelCase , **UpperCAmelCase )
lowercase : Dict =num_mel_bins
lowercase : str =do_ceptral_normalize
lowercase : Union[str, Any] =normalize_means
lowercase : Optional[Any] =normalize_vars
lowercase : List[str] =True
def A__ ( self : List[str] , UpperCAmelCase : np.ndarray , ) -> np.ndarray:
'''simple docstring'''
lowercase : Tuple =waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowercase : List[str] =torch.from_numpy(UpperCAmelCase ).unsqueeze(0 )
lowercase : Dict =ta_kaldi.fbank(UpperCAmelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def A__ ( UpperCAmelCase : np.ndarray , UpperCAmelCase : int , UpperCAmelCase : Optional[bool] = True , UpperCAmelCase : Optional[bool] = True , UpperCAmelCase : float = 0.0 , ) -> np.ndarray:
'''simple docstring'''
if normalize_means:
lowercase : str =x[:input_length].mean(axis=0 )
lowercase : Union[str, Any] =np.subtract(UpperCAmelCase , UpperCAmelCase )
if normalize_vars:
lowercase : List[str] =x[:input_length].std(axis=0 )
lowercase : Optional[int] =np.divide(UpperCAmelCase , UpperCAmelCase )
if input_length < x.shape[0]:
lowercase : Any =padding_value
# make sure array is in float32
lowercase : Any =x.astype(np.floataa )
return x
def A__ ( self : Optional[Any] , UpperCAmelCase : List[np.ndarray] , UpperCAmelCase : Optional[np.ndarray] = None ) -> List[np.ndarray]:
'''simple docstring'''
lowercase : Union[str, Any] =attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCAmelCase , UpperCAmelCase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCAmelCase , UpperCAmelCase )
]
def __call__( self : Optional[Any] , UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , **UpperCAmelCase : List[Any] , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase : Optional[Any] =isinstance(UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowercase : Optional[int] =is_batched_numpy or (
isinstance(UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : List[str] =[np.asarray(UpperCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase , np.ndarray ):
lowercase : int =np.asarray(UpperCAmelCase , dtype=np.floataa )
elif isinstance(UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : Tuple =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : Any =[raw_speech]
# extract fbank features
lowercase : Any =[self._extract_fbank_features(UpperCAmelCase ) for waveform in raw_speech]
# convert into correct format for padding
lowercase : Optional[Any] =BatchFeature({'''input_features''': features} )
lowercase : List[str] =self.pad(
UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , truncation=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , **UpperCAmelCase , )
# make sure list is in array format
lowercase : Any =padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , UpperCAmelCase ):
lowercase : Optional[Any] =[np.asarray(UpperCAmelCase , dtype=np.floataa ) for feature in input_features]
lowercase : Union[str, Any] =padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
lowercase : int =[np.asarray(UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowercase : List[str] =(
np.array(UpperCAmelCase , dtype=np.intaa )
if self._get_padding_strategies(UpperCAmelCase , max_length=UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowercase : int =self.normalize(
padded_inputs['''input_features'''] , attention_mask=UpperCAmelCase )
if return_tensors is not None:
lowercase : Dict =padded_inputs.convert_to_tensors(UpperCAmelCase )
return padded_inputs
| 706 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 8 | 0 |
'''simple docstring'''
import os
SCREAMING_SNAKE_CASE = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1_000}
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : str =0
lowercase : List[Any] =0
while index < len(__A ) - 1:
lowercase : List[Any] =SYMBOLS[numerals[index]]
lowercase : Tuple =SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def lowercase_ ( __A : int ) -> str:
"""simple docstring"""
lowercase : Optional[Any] =''''''
lowercase : List[Any] =num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
lowercase : List[Any] =num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
lowercase : int =num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def lowercase_ ( __A : str = "/p089_roman.txt" ) -> int:
"""simple docstring"""
lowercase : List[str] =0
with open(os.path.dirname(__A ) + roman_numerals_filename ) as filea:
lowercase : List[Any] =filea.readlines()
for line in lines:
lowercase : List[Any] =line.strip()
lowercase : Optional[int] =parse_roman_numerals(__A )
lowercase : int =generate_roman_numerals(__A )
savings += len(__A ) - len(__A )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 707 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''LayoutLMv2ImageProcessor'''
UpperCamelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : List[str] , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Any =kwargs.pop('''feature_extractor''' )
lowercase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase : Tuple =self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Optional[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : List[str] =features['''words''']
lowercase : Optional[Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
lowercase : List[str] =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : str =self.get_overflowing_images(UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Dict =images
return encoded_inputs
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : str =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}' )
return images_with_overflow
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8 | 0 |
'''simple docstring'''
from PIL import Image
def lowercase_ ( __A : Image , __A : float ) -> Image:
"""simple docstring"""
def brightness(__A : int ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__A )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE = change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 708 |
'''simple docstring'''
def lowercase_ ( __A : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
"""simple docstring"""
try:
lowercase : Any =int(__A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =1
lowercase : Dict =2
while i * i <= n:
while n % i == 0:
lowercase : Optional[int] =i
n //= i
i += 1
if n > 1:
lowercase : Dict =n
return int(__A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''sew-d'''
def __init__( self : Optional[Any] , UpperCAmelCase : Any=32 , UpperCAmelCase : str=768 , UpperCAmelCase : List[Any]=12 , UpperCAmelCase : List[str]=12 , UpperCAmelCase : List[Any]=3072 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : List[str]=512 , UpperCAmelCase : str=256 , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : str=("p2c", "c2p") , UpperCAmelCase : Optional[Any]="layer_norm" , UpperCAmelCase : Dict="gelu_python" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : str=0.0_2 , UpperCAmelCase : List[str]=1e-7 , UpperCAmelCase : Dict=1e-5 , UpperCAmelCase : Optional[Any]="group" , UpperCAmelCase : str="gelu" , UpperCAmelCase : str=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCAmelCase : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCAmelCase : int=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Dict=128 , UpperCAmelCase : Union[str, Any]=16 , UpperCAmelCase : int=True , UpperCAmelCase : List[str]=0.0_5 , UpperCAmelCase : Tuple=10 , UpperCAmelCase : Any=2 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Tuple=10 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : Optional[Any]="mean" , UpperCAmelCase : Any=False , UpperCAmelCase : int=False , UpperCAmelCase : List[str]=256 , UpperCAmelCase : str=0 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Tuple=2 , **UpperCAmelCase : int , ) -> str:
'''simple docstring'''
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowercase : Union[str, Any] =hidden_size
lowercase : Optional[Any] =feat_extract_norm
lowercase : Dict =feat_extract_activation
lowercase : Optional[Any] =list(UpperCAmelCase )
lowercase : Optional[int] =list(UpperCAmelCase )
lowercase : List[Any] =list(UpperCAmelCase )
lowercase : Dict =conv_bias
lowercase : Any =num_conv_pos_embeddings
lowercase : Tuple =num_conv_pos_embedding_groups
lowercase : List[Any] =len(self.conv_dim )
lowercase : Any =num_hidden_layers
lowercase : List[Any] =intermediate_size
lowercase : Any =squeeze_factor
lowercase : Dict =max_position_embeddings
lowercase : Union[str, Any] =position_buckets
lowercase : List[Any] =share_att_key
lowercase : Optional[int] =relative_attention
lowercase : List[Any] =norm_rel_ebd
lowercase : List[str] =list(UpperCAmelCase )
lowercase : Optional[Any] =hidden_act
lowercase : Optional[int] =num_attention_heads
lowercase : int =hidden_dropout
lowercase : Optional[Any] =attention_dropout
lowercase : Optional[Any] =activation_dropout
lowercase : Union[str, Any] =feat_proj_dropout
lowercase : Union[str, Any] =final_dropout
lowercase : Dict =layer_norm_eps
lowercase : Tuple =feature_layer_norm_eps
lowercase : Optional[Any] =initializer_range
lowercase : Optional[int] =vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase : str =apply_spec_augment
lowercase : str =mask_time_prob
lowercase : int =mask_time_length
lowercase : Tuple =mask_time_min_masks
lowercase : Dict =mask_feature_prob
lowercase : Any =mask_feature_length
lowercase : Dict =mask_feature_min_masks
# ctc loss
lowercase : List[str] =ctc_loss_reduction
lowercase : Dict =ctc_zero_infinity
# sequence classification
lowercase : int =use_weighted_layer_sum
lowercase : Dict =classifier_proj_size
@property
def A__ ( self : int ) -> Any:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 709 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
lowercase : str =u
for i in range(1 , __A ):
lowercase : Any =temp * (u - i)
return temp
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : List[str] =int(input('''enter the numbers of values: ''' ) )
lowercase : list[list[float]] =[]
for _ in range(__A ):
y.append([] )
for i in range(__A ):
for j in range(__A ):
y[i].append(__A )
lowercase : List[Any] =0
print('''enter the values of parameters in a list: ''' )
lowercase : Optional[int] =list(map(__A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__A ):
lowercase : str =float(input() )
lowercase : int =int(input('''enter the value to interpolate: ''' ) )
lowercase : Union[str, Any] =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A ):
for j in range(n - i ):
lowercase : str =y[j + 1][i - 1] - y[j][i - 1]
lowercase : Any =y[0][0]
for i in range(1 , __A ):
summ += (ucal(__A , __A ) * y[0][i]) / math.factorial(__A )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 8 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 710 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =0
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : str =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : int =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str =CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase : Optional[int] =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : Optional[Any] =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase : str =CLIPImageProcessor(**UpperCAmelCase )
# save in new folder
model_config.save_pretrained(UpperCAmelCase )
config.save_pretrained(UpperCAmelCase )
lowercase : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : int =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained('''clip-base''' )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Optional[int] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
lowercase : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
lowercase : List[str] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[int] =CustomImageProcessor.from_pretrained(UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self : Any ) -> Any:
'''simple docstring'''
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# If remote code is not set, the default is to use local
lowercase : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase : Tuple =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Dict =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 8 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = ViTImageProcessor if is_vision_available() else None
@property
def A__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : Tuple =(3, 32, 128)
lowercase : str =tempfile.mkdtemp()
# fmt: off
lowercase : Tuple =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowercase : Union[str, Any] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + '''\n''' )
lowercase : Any ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
lowercase : List[Any] =os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Dict , **UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A__ ( self : str , **UpperCAmelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A__ ( self : Optional[int] ) -> int:
'''simple docstring'''
lowercase : Any =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
lowercase : List[Any] =Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) )
return image_input
def A__ ( self : Any ) -> int:
'''simple docstring'''
lowercase : Tuple =self.get_tokenizer()
lowercase : Dict =self.get_image_processor()
lowercase : Tuple =MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowercase : str =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : str =self.get_tokenizer()
lowercase : List[Any] =self.get_image_processor()
lowercase : Dict =MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowercase : str =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase : str =self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowercase : Any =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase : Optional[int] =self.get_image_processor()
lowercase : Dict =self.get_tokenizer()
lowercase : List[Any] =MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Any =self.prepare_image_inputs()
lowercase : List[Any] =image_processor(UpperCAmelCase , return_tensors='''np''' )
lowercase : str =processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =self.get_image_processor()
lowercase : List[Any] =self.get_tokenizer()
lowercase : str =MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : List[str] ='''test'''
lowercase : Optional[int] =processor(text=UpperCAmelCase )
lowercase : Union[str, Any] =tokenizer(UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A__ ( self : Optional[int] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =self.get_image_processor()
lowercase : Optional[int] =self.get_tokenizer()
lowercase : Optional[Any] =MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Any ='''test'''
lowercase : Any =self.prepare_image_inputs()
lowercase : str =processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
lowercase : str =self.get_image_processor()
lowercase : Optional[Any] =self.get_tokenizer()
lowercase : Dict =MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Optional[int] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase : Optional[int] =processor.char_decode(UpperCAmelCase )
lowercase : Union[str, Any] =tokenizer.batch_decode(UpperCAmelCase )
lowercase : Tuple =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Tuple ) -> Dict:
'''simple docstring'''
lowercase : str =self.get_image_processor()
lowercase : List[Any] =self.get_tokenizer()
lowercase : Any =MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : str =None
lowercase : Optional[int] =self.prepare_image_inputs()
lowercase : Optional[int] =processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] =self.get_image_processor()
lowercase : Dict =self.get_tokenizer()
lowercase : int =MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : str =torch.randn(1 , 27 , 38 )
lowercase : List[Any] =torch.randn(1 , 27 , 5_0257 )
lowercase : Union[str, Any] =torch.randn(1 , 27 , 3_0522 )
lowercase : Union[str, Any] =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 711 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE = 8.988E9 # units = N * m^s * C^-2
def lowercase_ ( __A : float , __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
lowercase : Dict =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowercase : Union[str, Any] =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase : Tuple =(COULOMBS_CONSTANT * charge_product / abs(__A )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 712 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8 | 0 |
'''simple docstring'''
from math import pi
def lowercase_ ( __A : int , __A : int ) -> float:
"""simple docstring"""
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 713 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =list(poly_a or [0] )[:]
lowercase : Dict =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : Tuple =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[int] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : str =self.__multiply()
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase : List[Any] =self.c_max_length // 2
while next_ncol > 0:
lowercase : str =[[] for i in range(UpperCAmelCase )]
lowercase : List[str] =self.root**next_ncol
# First half of next step
lowercase : Union[str, Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Tuple =new_dft
lowercase : List[Any] =next_ncol // 2
return dft[0]
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.__dft('''A''' )
lowercase : Union[str, Any] =self.__dft('''B''' )
lowercase : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Tuple =2
while next_ncol <= self.c_max_length:
lowercase : Tuple =[[] for i in range(UpperCAmelCase )]
lowercase : Tuple =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : List[str] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] ='''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] ='''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[str] ) -> None:
'''simple docstring'''
lowercase : dict[str, TrieNode] ={} # Mapping from char to TrieNode
lowercase : Optional[int] =False
def A__ ( self : Tuple , UpperCAmelCase : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(UpperCAmelCase )
def A__ ( self : str , UpperCAmelCase : str ) -> None:
'''simple docstring'''
lowercase : Optional[int] =self
for char in word:
if char not in curr.nodes:
lowercase : Union[str, Any] =TrieNode()
lowercase : List[Any] =curr.nodes[char]
lowercase : Union[str, Any] =True
def A__ ( self : Union[str, Any] , UpperCAmelCase : str ) -> bool:
'''simple docstring'''
lowercase : List[Any] =self
for char in word:
if char not in curr.nodes:
return False
lowercase : Tuple =curr.nodes[char]
return curr.is_leaf
def A__ ( self : Dict , UpperCAmelCase : str ) -> None:
'''simple docstring'''
def _delete(UpperCAmelCase : TrieNode , UpperCAmelCase : str , UpperCAmelCase : int ) -> bool:
if index == len(UpperCAmelCase ):
# If word does not exist
if not curr.is_leaf:
return False
lowercase : Union[str, Any] =False
return len(curr.nodes ) == 0
lowercase : Dict =word[index]
lowercase : Dict =curr.nodes.get(UpperCAmelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowercase : Optional[int] =_delete(UpperCAmelCase , UpperCAmelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCAmelCase , 0 )
def lowercase_ ( __A : TrieNode , __A : str ) -> None:
"""simple docstring"""
if node.is_leaf:
print(__A , end=''' ''' )
for key, value in node.nodes.items():
print_words(__A , word + key )
def lowercase_ ( ) -> bool:
"""simple docstring"""
lowercase : Union[str, Any] ='''banana bananas bandana band apple all beast'''.split()
lowercase : Dict =TrieNode()
root.insert_many(__A )
# print_words(root, "")
assert all(root.find(__A ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowercase_ ( __A : str , __A : bool ) -> None:
"""simple docstring"""
print(str(__A ) , '''works!''' if passes else '''doesn\'t work :(''' )
def lowercase_ ( ) -> None:
"""simple docstring"""
assert test_trie()
def lowercase_ ( ) -> None:
"""simple docstring"""
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 714 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : List[Any] =str(bin(__A ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : Union[str, Any] =str(bin(__A ) )[2:]
if shift_amount >= len(__A ):
return "0b0"
lowercase : Any =binary_number[: len(__A ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
lowercase : str ='''0''' + str(bin(__A ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase : Dict =len(bin(__A )[3:] ) # Find 2's complement of number
lowercase : Optional[Any] =bin(abs(__A ) - (1 << binary_number_length) )[3:]
lowercase : int =(
'''1''' + '''0''' * (binary_number_length - len(__A )) + binary_number
)
if shift_amount >= len(__A ):
return "0b" + binary_number[0] * len(__A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def A__ ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
pass
def lowercase_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =pipeline(
'''document-question-answering''' , model=UpperCAmelCase , tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Optional[Any] =INVOICE_URL
lowercase : Any =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
lowercase : Dict ='''What is the placebo?'''
lowercase : Optional[Any] =[
{
'''image''': load_image(UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =dqa_pipeline(UpperCAmelCase , top_k=2 )
self.assertEqual(
UpperCAmelCase , [
[
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Dict =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
lowercase : Union[str, Any] =INVOICE_URL
lowercase : Tuple ='''How many cats are there?'''
lowercase : Optional[int] =[
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowercase : Optional[Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
lowercase : List[str] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase : Optional[Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Any =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase : int ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Dict =[]
lowercase : str =[]
lowercase : str =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , words=UpperCAmelCase , boxes=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
lowercase : Dict =INVOICE_URL
lowercase : str ='''What is the invoice number?'''
lowercase : List[str] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
lowercase : Dict =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Union[str, Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : int =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Any =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : str ) -> Dict:
'''simple docstring'''
lowercase : Any =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : Tuple =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , )
lowercase : Tuple =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Dict =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : Any =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : str =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
lowercase : Tuple =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Dict =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : Dict =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : List[Any] =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
lowercase : str =INVOICE_URL
lowercase : int ='''What is the invoice number?'''
lowercase : Tuple =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Union[str, Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
lowercase : List[str] =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Union[str, Any] =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
lowercase : Any =INVOICE_URL
lowercase : Union[str, Any] ='''What is the invoice number?'''
lowercase : int =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
pass
| 8 | 0 |
'''simple docstring'''
def lowercase_ ( __A : float ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(__A , __A ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def lowercase_ ( __A : float ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(__A , __A ):
raise ValueError('''Length must be a positive.''' )
return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
'''simple docstring'''
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A ) , __A )
return number - int(__A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 8 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =parent
lowercase : Any =13
lowercase : Any =7
lowercase : Optional[int] =True
lowercase : Optional[int] =True
lowercase : Tuple =False
lowercase : Optional[Any] =True
lowercase : Dict =99
lowercase : Union[str, Any] =32
lowercase : Union[str, Any] =2
lowercase : Union[str, Any] =4
lowercase : List[str] =37
lowercase : str ='''gelu'''
lowercase : Dict =0.1
lowercase : List[Any] =0.1
lowercase : List[str] =512
lowercase : Optional[int] =16
lowercase : Optional[Any] =2
lowercase : List[str] =0.0_2
lowercase : Any =3
lowercase : Optional[Any] =4
lowercase : int =None
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
lowercase : Any =None
lowercase : str =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =TFDistilBertModel(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(UpperCAmelCase )
lowercase : str =[input_ids, input_mask]
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =TFDistilBertForMaskedLM(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str =TFDistilBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Optional[Any] =TFDistilBertForSequenceClassification(UpperCAmelCase )
lowercase : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Optional[int] =TFDistilBertForMultipleChoice(UpperCAmelCase )
lowercase : Optional[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Tuple =TFDistilBertForTokenClassification(UpperCAmelCase )
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : str =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
(lowercase) : Union[str, Any] =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : str =TFDistilBertModelTester(self )
lowercase : int =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase : Union[str, Any] =TFDistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : Tuple =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[Any] =model(UpperCAmelCase )[0]
lowercase : str =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Optional[int] =tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 717 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : List[Any] , __A : int , __A : int ) -> Optional[int]:
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowercase_ ( __A : np.ndarray , __A : Optional[str] , __A : Optional[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : int =to_pil_image(__A )
lowercase , lowercase : Tuple =pil_image.size
lowercase : Optional[Any] =pytesseract.image_to_data(__A , lang=__A , output_type='''dict''' , config=__A )
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase : Dict =[idx for idx, word in enumerate(__A ) if not word.strip()]
lowercase : str =[word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : Optional[int] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : List[Any] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : str =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : int =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase : Tuple =[]
for x, y, w, h in zip(__A , __A , __A , __A ):
lowercase : str =[x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
lowercase : List[str] =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : float = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = "" , **UpperCAmelCase : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Tuple =size if size is not None else {'''height''': 224, '''width''': 224}
lowercase : Optional[Any] =get_size_dict(UpperCAmelCase )
lowercase : Optional[Any] =do_resize
lowercase : List[Any] =size
lowercase : List[str] =resample
lowercase : Dict =do_rescale
lowercase : str =rescale_value
lowercase : Optional[int] =do_normalize
lowercase : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Union[str, Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase : List[Any] =apply_ocr
lowercase : Union[str, Any] =ocr_lang
lowercase : str =tesseract_config
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase : Tuple =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase : Optional[Any] =(size['''height'''], size['''width'''])
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase : Optional[int] =do_resize if do_resize is not None else self.do_resize
lowercase : Tuple =size if size is not None else self.size
lowercase : Optional[int] =get_size_dict(UpperCAmelCase )
lowercase : List[str] =resample if resample is not None else self.resample
lowercase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Optional[int] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[Any] =image_mean if image_mean is not None else self.image_mean
lowercase : Optional[int] =image_std if image_std is not None else self.image_std
lowercase : Any =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase : Any =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase : Dict =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase : str =make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowercase : Tuple =[to_numpy_array(UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase : int =[]
lowercase : Tuple =[]
for image in images:
lowercase , lowercase : Dict =apply_tesseract(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
words_batch.append(UpperCAmelCase )
boxes_batch.append(UpperCAmelCase )
if do_resize:
lowercase : int =[self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowercase : str =[self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowercase : Optional[Any] =[to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase : Dict =BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase )
if apply_ocr:
lowercase : int =words_batch
lowercase : List[str] =boxes_batch
return data
| 8 | 0 |
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : UNetaDModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : DDPMScheduler , UpperCAmelCase : Optional[int] , ) -> str:
'''simple docstring'''
super().__init__()
lowercase : str =value_function
lowercase : Union[str, Any] =unet
lowercase : List[str] =scheduler
lowercase : Optional[Any] =env
lowercase : Any =env.get_dataset()
lowercase : Tuple ={}
for key in self.data.keys():
try:
lowercase : Any =self.data[key].mean()
except: # noqa: E722
pass
lowercase : Tuple ={}
for key in self.data.keys():
try:
lowercase : Tuple =self.data[key].std()
except: # noqa: E722
pass
lowercase : Optional[int] =env.observation_space.shape[0]
lowercase : Dict =env.action_space.shape[0]
def A__ ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any ) -> Dict:
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def A__ ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def A__ ( self : str , UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
if type(UpperCAmelCase ) is dict:
return {k: self.to_torch(UpperCAmelCase ) for k, v in x_in.items()}
elif torch.is_tensor(UpperCAmelCase ):
return x_in.to(self.unet.device )
return torch.tensor(UpperCAmelCase , device=self.unet.device )
def A__ ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
for key, val in cond.items():
lowercase : Dict =val.clone()
return x_in
def A__ ( self : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
lowercase : Optional[int] =x.shape[0]
lowercase : str =None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase : List[Any] =torch.full((batch_size,) , UpperCAmelCase , device=self.unet.device , dtype=torch.long )
for _ in range(UpperCAmelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase : Dict =self.value_function(x.permute(0 , 2 , 1 ) , UpperCAmelCase ).sample
lowercase : List[Any] =torch.autograd.grad([y.sum()] , [x] )[0]
lowercase : Union[str, Any] =self.scheduler._get_variance(UpperCAmelCase )
lowercase : Any =torch.exp(0.5 * posterior_variance )
lowercase : str =model_std * grad
lowercase : Any =0
lowercase : List[Any] =x.detach()
lowercase : Optional[Any] =x + scale * grad
lowercase : Any =self.reset_xa(UpperCAmelCase , UpperCAmelCase , self.action_dim )
lowercase : Dict =self.unet(x.permute(0 , 2 , 1 ) , UpperCAmelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowercase : Optional[int] =self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , predict_epsilon=UpperCAmelCase )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
lowercase : Tuple =self.reset_xa(UpperCAmelCase , UpperCAmelCase , self.action_dim )
lowercase : Any =self.to_torch(UpperCAmelCase )
return x, y
def __call__( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[str]=64 , UpperCAmelCase : Dict=32 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : List[str]=0.1 ) -> Any:
'''simple docstring'''
lowercase : Tuple =self.normalize(UpperCAmelCase , '''observations''' )
lowercase : Dict =obs[None].repeat(UpperCAmelCase , axis=0 )
lowercase : str ={0: self.to_torch(UpperCAmelCase )}
lowercase : Any =(batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase : Tuple =randn_tensor(UpperCAmelCase , device=self.unet.device )
lowercase : Optional[Any] =self.reset_xa(UpperCAmelCase , UpperCAmelCase , self.action_dim )
lowercase : Dict =self.to_torch(UpperCAmelCase )
# run the diffusion process
lowercase : Tuple =self.run_diffusion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# sort output trajectories by value
lowercase : int =y.argsort(0 , descending=UpperCAmelCase ).squeeze()
lowercase : Union[str, Any] =x[sorted_idx]
lowercase : Optional[int] =sorted_values[:, :, : self.action_dim]
lowercase : str =actions.detach().cpu().numpy()
lowercase : List[Any] =self.de_normalize(UpperCAmelCase , key='''actions''' )
# select the action with the highest value
if y is not None:
lowercase : List[str] =0
else:
# if we didn't run value guiding, select a random action
lowercase : List[str] =np.random.randint(0 , UpperCAmelCase )
lowercase : List[Any] =denorm_actions[selected_index, 0]
return denorm_actions
| 718 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =parent
lowercase : Any =13
lowercase : Any =7
lowercase : Optional[int] =True
lowercase : Optional[int] =True
lowercase : Tuple =False
lowercase : Optional[Any] =True
lowercase : Dict =99
lowercase : Union[str, Any] =32
lowercase : Union[str, Any] =2
lowercase : Union[str, Any] =4
lowercase : List[str] =37
lowercase : str ='''gelu'''
lowercase : Dict =0.1
lowercase : List[Any] =0.1
lowercase : List[str] =512
lowercase : Optional[int] =16
lowercase : Optional[Any] =2
lowercase : List[str] =0.0_2
lowercase : Any =3
lowercase : Optional[Any] =4
lowercase : int =None
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
lowercase : Any =None
lowercase : str =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =TFDistilBertModel(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(UpperCAmelCase )
lowercase : str =[input_ids, input_mask]
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =TFDistilBertForMaskedLM(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str =TFDistilBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Optional[Any] =TFDistilBertForSequenceClassification(UpperCAmelCase )
lowercase : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Optional[int] =TFDistilBertForMultipleChoice(UpperCAmelCase )
lowercase : Optional[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Tuple =TFDistilBertForTokenClassification(UpperCAmelCase )
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : str =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Union[str, Any] =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : str =TFDistilBertModelTester(self )
lowercase : int =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase : Union[str, Any] =TFDistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : Tuple =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[Any] =model(UpperCAmelCase )[0]
lowercase : str =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Optional[int] =tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 8 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''CLIPImageProcessor'''
UpperCamelCase_ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[Any]=None , **UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Tuple =kwargs.pop('''feature_extractor''' )
lowercase : List[str] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : int=None , UpperCAmelCase : str=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase : int =self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if images is not None:
lowercase : Union[str, Any] =self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
lowercase : Optional[Any] =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def A__ ( self : Tuple , *UpperCAmelCase : Any , **UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Optional[int] , *UpperCAmelCase : Tuple , **UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =self.tokenizer.model_input_names
lowercase : List[str] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Any ) -> Dict:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE = logging.getLogger()
def lowercase_ ( __A : Path , __A : list ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[Any] ='''\n'''.join(__A )
Path(__A ).open('''w''' ).writelines(__A )
SCREAMING_SNAKE_CASE = 'patrickvonplaten/t5-tiny-random'
SCREAMING_SNAKE_CASE = 'sshleifer/bart-tiny-random'
SCREAMING_SNAKE_CASE = 'sshleifer/tiny-mbart'
SCREAMING_SNAKE_CASE = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase__ ( __A ):
"""simple docstring"""
def A__ ( self : Optional[Any] , UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
lowercase : List[str] =Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
lowercase : Optional[int] =input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
lowercase : List[str] =[''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(UpperCAmelCase , UpperCAmelCase )
lowercase : Union[str, Any] =str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
lowercase : Tuple ='''translation_en_to_de''' if model == T5_TINY else '''summarization'''
lowercase : int =f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(UpperCAmelCase , '''argv''' , UpperCAmelCase ):
run_generate()
assert Path(UpperCAmelCase ).exists()
# os.remove(Path(output_file_name))
def A__ ( self : Dict ) -> int:
'''simple docstring'''
self.run_eval_tester(UpperCAmelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def A__ ( self : Any , UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.run_eval_tester(UpperCAmelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def A__ ( self : List[str] , UpperCAmelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
lowercase : Optional[Any] =input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
lowercase : Union[str, Any] ={
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
lowercase : List[str] =Path(self.get_auto_remove_tmp_dir() )
lowercase : Any =str(tmp_dir / '''scores.json''' )
lowercase : Union[str, Any] =str(tmp_dir / '''val.target''' )
_dump_articles(UpperCAmelCase , text['''en'''] )
_dump_articles(UpperCAmelCase , text['''de'''] )
lowercase : Tuple ='''translation_en_to_de''' if model == T5_TINY else '''summarization'''
lowercase : str =f'\n run_eval_search.py\n {model}\n {str(UpperCAmelCase )}\n {str(UpperCAmelCase )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(UpperCAmelCase , '''argv''' , UpperCAmelCase ):
with CaptureStdout() as cs:
run_search()
lowercase : List[Any] =[''' num_beams | length_penalty''', model, '''Best score args''']
lowercase : List[Any] =['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(UpperCAmelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(UpperCAmelCase ).exists()
os.remove(Path(UpperCAmelCase ) )
| 720 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version('torch'))
def lowercase_ ( __A : Union[str, Version] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase : Any =STR_OPERATION_TO_FUNC[operation]
if isinstance(__A , __A ):
lowercase : List[Any] =parse(importlib.metadata.version(__A ) )
return operation(__A , parse(__A ) )
def lowercase_ ( __A : str , __A : str ) -> Tuple:
"""simple docstring"""
return compare_versions(__A , __A , __A )
| 8 | 0 |
'''simple docstring'''
import torch
from torch import nn
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int]=1 , UpperCAmelCase : List[str]=False ) -> int:
'''simple docstring'''
super().__init__()
lowercase : Optional[Any] =n_token
lowercase : List[Any] =d_embed
lowercase : Dict =d_proj
lowercase : Dict =cutoffs + [n_token]
lowercase : List[str] =[0] + self.cutoffs
lowercase : Union[str, Any] =div_val
lowercase : int =self.cutoffs[0]
lowercase : Dict =len(self.cutoffs ) - 1
lowercase : Dict =self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase : Any =nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowercase : Any =nn.Parameter(torch.zeros(self.n_clusters ) )
lowercase : int =nn.ModuleList()
lowercase : str =nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCAmelCase , UpperCAmelCase ) ) )
else:
self.out_projs.append(UpperCAmelCase )
self.out_layers.append(nn.Linear(UpperCAmelCase , UpperCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
lowercase : Union[str, Any] =self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase : Optional[int] =d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCAmelCase , UpperCAmelCase ) ) )
self.out_layers.append(nn.Linear(UpperCAmelCase , r_idx - l_idx ) )
lowercase : Optional[Any] =keep_order
def A__ ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if proj is None:
lowercase : Any =nn.functional.linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase : Dict =nn.functional.linear(UpperCAmelCase , proj.t().contiguous() )
lowercase : Dict =nn.functional.linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def A__ ( self : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple=None , UpperCAmelCase : Dict=False ) -> str:
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
lowercase : Dict =hidden[..., :-1, :].contiguous()
lowercase : Dict =labels[..., 1:].contiguous()
lowercase : int =hidden.view(-1 , hidden.size(-1 ) )
lowercase : Optional[Any] =labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
lowercase : Tuple =hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowercase : Any =self._compute_logit(UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowercase : List[str] =labels != -100
lowercase : List[Any] =torch.zeros_like(UpperCAmelCase , dtype=hidden.dtype , device=hidden.device )
lowercase : Optional[int] =(
-nn.functional.log_softmax(UpperCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowercase : Tuple =nn.functional.log_softmax(UpperCAmelCase , dim=-1 )
else:
# construct weights and biases
lowercase : List[str] =[], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase : Any =self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase : Optional[Any] =self.out_layers[0].weight[l_idx:r_idx]
lowercase : str =self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase : Union[str, Any] =self.out_layers[i].weight
lowercase : Dict =self.out_layers[i].bias
if i == 0:
lowercase : List[str] =torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase : List[str] =torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCAmelCase )
biases.append(UpperCAmelCase )
lowercase : Any =weights[0], biases[0], self.out_projs[0]
lowercase : Optional[int] =self._compute_logit(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : str =nn.functional.log_softmax(UpperCAmelCase , dim=1 )
if labels is None:
lowercase : Any =hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowercase : List[Any] =torch.zeros_like(UpperCAmelCase , dtype=hidden.dtype , device=hidden.device )
lowercase : Dict =0
lowercase : List[Any] =[0] + self.cutoffs
for i in range(len(UpperCAmelCase ) - 1 ):
lowercase : List[Any] =cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase : List[str] =(labels >= l_idx) & (labels < r_idx)
lowercase : Optional[Any] =mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase : Any =labels.index_select(0 , UpperCAmelCase ) - l_idx
lowercase : str =head_logprob.index_select(0 , UpperCAmelCase )
lowercase : int =hidden.index_select(0 , UpperCAmelCase )
else:
lowercase : Dict =hidden
if i == 0:
if labels is not None:
lowercase : Optional[int] =head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowercase : Union[str, Any] =head_logprob[:, : self.cutoffs[0]]
else:
lowercase : int =weights[i], biases[i], self.out_projs[i]
lowercase : Union[str, Any] =self._compute_logit(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Union[str, Any] =nn.functional.log_softmax(UpperCAmelCase , dim=1 )
lowercase : Any =self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase : Any =head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowercase : int =head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase : Union[str, Any] =logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , UpperCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def A__ ( self : List[Any] , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if self.n_clusters == 0:
lowercase : str =self._compute_logit(UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(UpperCAmelCase , dim=-1 )
else:
# construct weights and biases
lowercase : Union[str, Any] =[], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase : Union[str, Any] =self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase : int =self.out_layers[0].weight[l_idx:r_idx]
lowercase : int =self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase : str =self.out_layers[i].weight
lowercase : Union[str, Any] =self.out_layers[i].bias
if i == 0:
lowercase : str =torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase : Dict =torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCAmelCase )
biases.append(UpperCAmelCase )
lowercase : Optional[int] =weights[0], biases[0], self.out_projs[0]
lowercase : str =self._compute_logit(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : int =hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowercase : List[str] =nn.functional.log_softmax(UpperCAmelCase , dim=1 )
lowercase : str =[0] + self.cutoffs
for i in range(len(UpperCAmelCase ) - 1 ):
lowercase : Any =cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase : Optional[int] =head_logprob[:, : self.cutoffs[0]]
else:
lowercase : List[str] =weights[i], biases[i], self.out_projs[i]
lowercase : str =self._compute_logit(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Dict =nn.functional.log_softmax(UpperCAmelCase , dim=1 )
lowercase : Optional[int] =head_logprob[:, -i] + tail_logprob_i
lowercase : str =logprob_i
return out
| 721 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase_ ( __A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
lowercase : List[Any] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
lowercase : List[str] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
lowercase : Union[str, Any] =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 8 | 0 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCamelCase : Optional[Any] = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowerCamelCase__ = cached_file(__lowerCAmelCase , __lowerCAmelCase , force_download=not use_cached_models )
lowerCamelCase__ = config_class.from_json_file(__lowerCAmelCase )
lowerCamelCase__ = True
lowerCamelCase__ = True
print(F'''Building TensorFlow model from configuration: {config}''' )
lowerCamelCase__ = model_class(__lowerCAmelCase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowerCamelCase__ = cached_file(
__lowerCAmelCase , __lowerCAmelCase , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowerCamelCase__ = load_pytorch_checkpoint_in_tfa_model(__lowerCAmelCase , __lowerCAmelCase )
if compare_with_pt_model:
lowerCamelCase__ = tf_model(tf_model.dummy_inputs , training=__lowerCAmelCase ) # build the network
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
lowerCamelCase__ = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__lowerCAmelCase , config=__lowerCAmelCase , state_dict=__lowerCAmelCase )
with torch.no_grad():
lowerCamelCase__ = pt_model(**pt_model.dummy_inputs )
lowerCamelCase__ = pto[0].numpy()
lowerCamelCase__ = tfo[0].numpy()
lowerCamelCase__ = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(__lowerCAmelCase , save_format="""h5""" )
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Any=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : List[Any]=False , ):
if args_model_type is None:
lowerCamelCase__ = list(MODEL_CLASSES.keys() )
else:
lowerCamelCase__ = [args_model_type]
for j, model_type in enumerate(__lowerCAmelCase , start=1 ):
print("""=""" * 100 )
print(F''' Converting model type {j}/{len(__lowerCAmelCase )}: {model_type}''' )
print("""=""" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowerCamelCase__ = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowerCamelCase__ = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__lowerCAmelCase , __lowerCAmelCase ) , start=1 ):
print("""-""" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
lowerCamelCase__ = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(__lowerCAmelCase )}: {model_shortcut_name} - model_type {model_type}''' )
print("""-""" * 100 )
if config_shortcut_name in aws_config_map:
lowerCamelCase__ = cached_file(__lowerCAmelCase , __lowerCAmelCase , force_download=not use_cached_models )
else:
lowerCamelCase__ = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowerCamelCase__ = cached_file(__lowerCAmelCase , __lowerCAmelCase , force_download=not use_cached_models )
else:
lowerCamelCase__ = model_shortcut_name
if os.path.isfile(__lowerCAmelCase ):
lowerCamelCase__ = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=__lowerCAmelCase , pytorch_checkpoint_path=__lowerCAmelCase , config_file=__lowerCAmelCase , tf_dump_path=os.path.join(__lowerCAmelCase , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__lowerCAmelCase , )
if remove_cached_files:
os.remove(__lowerCAmelCase )
os.remove(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
F'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
UpperCamelCase : str = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 9 |
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase = "▁" ,_lowerCAmelCase = True ,_lowerCAmelCase = "<unk>" ,_lowerCAmelCase = "</s>" ,_lowerCAmelCase = "<pad>" ,):
lowerCamelCase__ = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
lowerCamelCase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowerCamelCase__ = token_dict["""token"""]
lowerCamelCase__ = Tokenizer(Unigram() )
lowerCamelCase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) ,""" """ ),
normalizers.Lowercase(),
] )
lowerCamelCase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ),
pre_tokenizers.Punctuation(),
] )
lowerCamelCase__ = decoders.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase )
lowerCamelCase__ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' ,special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] ,)
lowerCamelCase__ = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,):
lowerCamelCase__ = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,)
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = [files]
self._tokenizer.train(_lowerCAmelCase ,trainer=_lowerCAmelCase )
self.add_unk_id()
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,):
lowerCamelCase__ = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,)
self._tokenizer.train_from_iterator(_lowerCAmelCase ,trainer=_lowerCAmelCase )
self.add_unk_id()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = json.loads(self._tokenizer.to_str() )
lowerCamelCase__ = self.special_tokens["""unk"""]["""id"""]
lowerCamelCase__ = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) )
| 9 | 1 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,):
super().__init__()
self.register_modules(
vae=_lowerCAmelCase ,text_encoder=_lowerCAmelCase ,tokenizer=_lowerCAmelCase ,unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ,safety_checker=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCAmelCase )
def UpperCamelCase_ ( self ):
self.enable_attention_slicing(_lowerCAmelCase )
@torch.no_grad()
def __call__( self ,_lowerCAmelCase ,_lowerCAmelCase = 5_12 ,_lowerCAmelCase = 5_12 ,_lowerCAmelCase = 50 ,_lowerCAmelCase = 7.5 ,_lowerCAmelCase = None ,_lowerCAmelCase = 1 ,_lowerCAmelCase = 0.0 ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = "pil" ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,_lowerCAmelCase = 1 ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = 1
elif isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = len(_lowerCAmelCase )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_lowerCAmelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowerCAmelCase ,_lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_lowerCAmelCase )}.''' )
# get prompt text embeddings
lowerCamelCase__ = self.tokenizer(
_lowerCAmelCase ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,return_tensors="""pt""" ,)
lowerCamelCase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCamelCase__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCamelCase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = text_embeddings.shape
lowerCamelCase__ = text_embeddings.repeat(1 ,_lowerCAmelCase ,1 )
lowerCamelCase__ = text_embeddings.view(bs_embed * num_images_per_prompt ,_lowerCAmelCase ,-1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase__ = 42
if negative_prompt is None:
lowerCamelCase__ = [""""""]
elif type(_lowerCAmelCase ) is not type(_lowerCAmelCase ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(_lowerCAmelCase )} !='''
F''' {type(_lowerCAmelCase )}.''' )
elif isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = [negative_prompt]
elif batch_size != len(_lowerCAmelCase ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(_lowerCAmelCase )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
lowerCamelCase__ = negative_prompt
lowerCamelCase__ = text_input_ids.shape[-1]
lowerCamelCase__ = self.tokenizer(
_lowerCAmelCase ,padding="""max_length""" ,max_length=_lowerCAmelCase ,truncation=_lowerCAmelCase ,return_tensors="""pt""" ,)
lowerCamelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase__ = uncond_embeddings.shape[1]
lowerCamelCase__ = uncond_embeddings.repeat(_lowerCAmelCase ,_lowerCAmelCase ,1 )
lowerCamelCase__ = uncond_embeddings.view(batch_size * num_images_per_prompt ,_lowerCAmelCase ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowerCamelCase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase__ = torch.randn(
_lowerCAmelCase ,generator=_lowerCAmelCase ,device="""cpu""" ,dtype=_lowerCAmelCase ).to(self.device )
lowerCamelCase__ = torch.randn(_lowerCAmelCase ,generator=_lowerCAmelCase ,device="""cpu""" ,dtype=_lowerCAmelCase ).to(
self.device )
else:
lowerCamelCase__ = torch.randn(
_lowerCAmelCase ,generator=_lowerCAmelCase ,device=self.device ,dtype=_lowerCAmelCase )
lowerCamelCase__ = torch.randn(_lowerCAmelCase ,generator=_lowerCAmelCase ,device=self.device ,dtype=_lowerCAmelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase__ = latents_reference.to(self.device )
lowerCamelCase__ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCamelCase__ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCamelCase__ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCamelCase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCamelCase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCamelCase__ = 0 if dx < 0 else dx
lowerCamelCase__ = 0 if dy < 0 else dy
lowerCamelCase__ = max(-dx ,0 )
lowerCamelCase__ = max(-dy ,0 )
# import pdb
# pdb.set_trace()
lowerCamelCase__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(_lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase__ = {}
if accepts_eta:
lowerCamelCase__ = eta
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(_lowerCAmelCase ,_lowerCAmelCase )
# predict the noise residual
lowerCamelCase__ = self.unet(_lowerCAmelCase ,_lowerCAmelCase ,encoder_hidden_states=_lowerCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ = self.scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = 1 / 0.1_8215 * latents
lowerCamelCase__ = self.vae.decode(_lowerCAmelCase ).sample
lowerCamelCase__ = (image / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase__ = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if self.safety_checker is not None:
lowerCamelCase__ = self.feature_extractor(self.numpy_to_pil(_lowerCAmelCase ) ,return_tensors="""pt""" ).to(
self.device )
lowerCamelCase__ , lowerCamelCase__ = self.safety_checker(
images=_lowerCAmelCase ,clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowerCamelCase__ = None
if output_type == "pil":
lowerCamelCase__ = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=_lowerCAmelCase ,nsfw_content_detected=_lowerCAmelCase )
| 9 |
'''simple docstring'''
from __future__ import annotations
import math
def A__ ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
lowerCamelCase__ = []
for num in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ = odd_composites[num] - 2 * i * i
if is_prime(__lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__lowerCAmelCase ) == n:
return list_nums
return []
def A__ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 1 |
'''simple docstring'''
import os
import numpy
import onnx
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Dict ):
lowerCamelCase__ = a.name
lowerCamelCase__ = b.name
lowerCamelCase__ = """"""
lowerCamelCase__ = """"""
lowerCamelCase__ = a == b
lowerCamelCase__ = name_a
lowerCamelCase__ = name_b
return res
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__lowerCAmelCase , __lowerCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __lowerCAmelCase , __lowerCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , __lowerCAmelCase , __lowerCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ):
for n in graph_proto.node:
_node_replace_input_with(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] ):
lowerCamelCase__ = list(model.graph.initializer )
lowerCamelCase__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowerCamelCase__ = inits[i].name
lowerCamelCase__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Optional[Any] ):
lowerCamelCase__ = os.path.dirname(__lowerCAmelCase )
lowerCamelCase__ = os.path.basename(__lowerCAmelCase )
lowerCamelCase__ = onnx.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCamelCase__ = list(model.graph.initializer )
lowerCamelCase__ = set()
lowerCamelCase__ = {}
lowerCamelCase__ = []
lowerCamelCase__ = 0
for i in range(len(__lowerCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__lowerCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__lowerCAmelCase )
dup_set.add(__lowerCAmelCase )
lowerCamelCase__ = inits[j].data_type
lowerCamelCase__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , __lowerCAmelCase )
total_reduced_size += mem_size
lowerCamelCase__ = inits[i].name
lowerCamelCase__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__lowerCAmelCase )
else:
lowerCamelCase__ = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
lowerCamelCase__ = sorted(__lowerCAmelCase )
_remove_dup_initializers_from_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = """optimized_""" + model_file_name
lowerCamelCase__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
onnx.save(__lowerCAmelCase , __lowerCAmelCase )
return new_model
| 9 |
'''simple docstring'''
def A__ ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(__lowerCAmelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 1 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=7 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=99 ,_lowerCAmelCase=64 ,_lowerCAmelCase=32 ,_lowerCAmelCase=5 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=16 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=4 ,_lowerCAmelCase=None ,):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = embedding_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
def UpperCamelCase_ ( self ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.num_choices )
lowerCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ):
return MegatronBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MegatronBertModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,token_type_ids=_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MegatronBertForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MegatronBertForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MegatronBertForNextSentencePrediction(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(
_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,labels=_lowerCAmelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MegatronBertForPreTraining(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(
_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,labels=_lowerCAmelCase ,next_sentence_label=_lowerCAmelCase ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MegatronBertForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(
_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,start_positions=_lowerCAmelCase ,end_positions=_lowerCAmelCase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MegatronBertForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MegatronBertForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_choices
lowerCamelCase__ = MegatronBertForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowerCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowerCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowerCamelCase__ = model(
_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,labels=_lowerCAmelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ (a ,a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase = True
# test_resize_embeddings = False
_UpperCamelCase = False
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=False ):
lowerCamelCase__ = super()._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ,return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
lowerCamelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=_lowerCAmelCase )
lowerCamelCase__ = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_lowerCAmelCase )
return inputs_dict
def UpperCamelCase_ ( self ):
lowerCamelCase__ = MegatronBertModelTester(self )
lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,hidden_size=37 )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_lowerCAmelCase )
def A__ ( __lowerCAmelCase : Optional[int] ):
return torch.tensor(
__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase , )
UpperCamelCase : Dict = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip("""Model is not available.""" )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
lowerCamelCase__ = os.path.join(os.environ["""MYDIR"""] ,_lowerCAmelCase )
lowerCamelCase__ = MegatronBertModel.from_pretrained(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.half()
lowerCamelCase__ = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
lowerCamelCase__ = model(_lowerCAmelCase )[0]
lowerCamelCase__ = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape ,_lowerCAmelCase )
lowerCamelCase__ = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
lowerCamelCase__ = output[0, ii, jj]
lowerCamelCase__ = expected[3 * ii + jj]
lowerCamelCase__ = """ii={} jj={} a={} b={}""".format(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
self.assertTrue(math.isclose(_lowerCAmelCase ,_lowerCAmelCase ,rel_tol=_lowerCAmelCase ,abs_tol=_lowerCAmelCase ) ,msg=_lowerCAmelCase )
| 9 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCamelCase : Dict = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
UpperCamelCase : List[Any] = {
'camembert-base': 5_12,
}
UpperCamelCase : List[str] = '▁'
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,)
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
lowerCamelCase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCamelCase__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
lowerCamelCase__ = len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = []
lowerCamelCase__ = """"""
lowerCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowerCamelCase__ = True
lowerCamelCase__ = []
else:
current_sub_tokens.append(_lowerCAmelCase )
lowerCamelCase__ = False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def __getstate__( self ):
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self ,_lowerCAmelCase ):
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ = os.path.join(
_lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase ,"""wb""" ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 9 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase : Tuple = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Dict = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = R"""\w+[.]\d+"""
lowerCamelCase__ = re.findall(__lowerCAmelCase , __lowerCAmelCase )
for pat in pats:
lowerCamelCase__ = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) )
return key
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ):
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
lowerCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase__ = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase__ = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=42 ):
# Step 1: Convert pytorch tensor to numpy
lowerCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCamelCase__ = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) )
lowerCamelCase__ = flatten_dict(__lowerCAmelCase )
lowerCamelCase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase__ = rename_key(__lowerCAmelCase )
lowerCamelCase__ = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
lowerCamelCase__ , lowerCamelCase__ = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowerCamelCase__ = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
| 9 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Any = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ):
lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sgugger/tiny-distilbert-classification"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """patrickvonplaten/t5-tiny-random"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,configs=[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_lowerCAmelCase ):
self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
| 9 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 42
_UpperCamelCase = 42
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ):
super().__init__()
self.register_modules(unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase )
@torch.no_grad()
def __call__( self ,_lowerCAmelCase = 1 ,_lowerCAmelCase = 50 ,_lowerCAmelCase = None ,_lowerCAmelCase = "pil" ,_lowerCAmelCase = True ,**_lowerCAmelCase ,):
lowerCamelCase__ = self.unet.config.sample_size
lowerCamelCase__ = (batch_size, 3, img_size, img_size)
lowerCamelCase__ = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
lowerCamelCase__ = randn_tensor(_lowerCAmelCase ,generator=_lowerCAmelCase ,device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
lowerCamelCase__ = self.scheduler.schedule[t]
lowerCamelCase__ = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
lowerCamelCase__ , lowerCamelCase__ = self.scheduler.add_noise_to_input(_lowerCAmelCase ,_lowerCAmelCase ,generator=_lowerCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
lowerCamelCase__ = (sigma_hat / 2) * model((sample_hat + 1) / 2 ,sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
lowerCamelCase__ = self.scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
lowerCamelCase__ = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 ,sigma_prev / 2 ).sample
lowerCamelCase__ = self.scheduler.step_correct(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,step_output.prev_sample ,step_output["""derivative"""] ,)
lowerCamelCase__ = step_output.prev_sample
lowerCamelCase__ = (sample / 2 + 0.5).clamp(0 ,1 )
lowerCamelCase__ = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowerCamelCase__ = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 9 |
'''simple docstring'''
from math import factorial
UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) )
def A__ ( __lowerCAmelCase : int = 60 , __lowerCAmelCase : int = 100_0000 ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
lowerCamelCase__ = 0
# the cached sizes of the previous chains
lowerCamelCase__ = {}
for start_chain_element in range(1 , __lowerCAmelCase ):
# The temporary set will contain the elements of the chain
lowerCamelCase__ = set()
lowerCamelCase__ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCamelCase__ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__lowerCAmelCase )
chain_set_length += 1
lowerCamelCase__ = digit_factorial_sum(__lowerCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCamelCase__ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}')
| 9 | 1 |
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
UpperCamelCase : List[Any] = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
UpperCamelCase : int = 'hopper-medium-v2'
UpperCamelCase : List[Any] = gym.make(env_name)
UpperCamelCase : str = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
UpperCamelCase : List[str] = env.reset()
UpperCamelCase : Any = 0
UpperCamelCase : Tuple = 0
UpperCamelCase : Optional[Any] = 10_00
UpperCamelCase : Dict = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
UpperCamelCase : List[Any] = pipeline(obs, planning_horizon=32)
# execute action in environment
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = env.step(denorm_actions)
UpperCamelCase : Tuple = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
F' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
UpperCamelCase : List[str] = next_observation
except KeyboardInterrupt:
pass
print(F'Total reward: {total_reward}')
| 9 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCamelCase : Optional[Any] = 'src/diffusers'
# Matches is_xxx_available()
UpperCamelCase : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
UpperCamelCase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
UpperCamelCase : Optional[int] = '\n{0} = None\n'
UpperCamelCase : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
UpperCamelCase : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def A__ ( __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = _re_backend.findall(__lowerCAmelCase )
if len(__lowerCAmelCase ) == 0:
return None
return "_and_".join(__lowerCAmelCase )
def A__ ( ):
with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase__ = 0
lowerCamelCase__ = {}
# Go through the end of the file
while line_index < len(__lowerCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
lowerCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1:
lowerCamelCase__ = lines[line_index]
lowerCamelCase__ = _re_single_line_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__lowerCAmelCase ) > 0:
lowerCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ):
if name.isupper():
return DUMMY_CONSTANT.format(__lowerCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase )
else:
return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Optional[int]=None ):
if backend_specific_objects is None:
lowerCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
lowerCamelCase__ = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]"""
lowerCamelCase__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] )
lowerCamelCase__ = dummy_file
return dummy_files
def A__ ( __lowerCAmelCase : List[str]=False ):
lowerCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase__ = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
lowerCamelCase__ = os.path.join(__lowerCAmelCase , """utils""" )
lowerCamelCase__ = {
backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main '''
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` '''
"""to fix this.""" )
if __name__ == "__main__":
UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase : Any = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 9 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' ,'False' ) ) is not True ,reason='Skipping test because should only be run when releasing minor transformers version' ,)
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() ,encoding="""utf-8""" ,check=_lowerCAmelCase ,)
assert hasattr(self ,"""env""" )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = F'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'''
# distributed data settings
lowerCamelCase__ = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=_lowerCAmelCase ,instance_count=_lowerCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=_lowerCAmelCase ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=_lowerCAmelCase ,py_version="""py36""" ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
TrainingJobAnalytics(_lowerCAmelCase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
# create estimator
lowerCamelCase__ = self.create_estimator(_lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
lowerCamelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCamelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' ,"""w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,_lowerCAmelCase )
| 9 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = GPTSwaTokenizer
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = """This is a test"""
lowerCamelCase__ = """This is a test"""
return input_text, output_text
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """<s>"""
lowerCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""j""" )
self.assertEqual(len(_lowerCAmelCase ) ,20_00 )
def UpperCamelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,20_00 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[4_65, 2_87, 2_65, 6_31, 8_42] )
lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
_lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,)
# fmt: on
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase ,[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] ,)
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
# fmt: off
self.assertListEqual(
_lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def UpperCamelCase_ ( self ):
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase )
lowerCamelCase__ = ["""This is a test""", """I was born in 92000, and this is falsé."""]
lowerCamelCase__ = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertListEqual(tokenizer.encode_fast(_lowerCAmelCase ) ,_lowerCAmelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertEqual(tokenizer.decode_fast(_lowerCAmelCase ) ,_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
lowerCamelCase__ = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=_lowerCAmelCase ,)
| 9 | 1 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" ,revision="""bf16""" ,dtype=jnp.bfloataa ,)
lowerCamelCase__ = """A painting of a squirrel eating a burger"""
lowerCamelCase__ = jax.device_count()
lowerCamelCase__ = num_samples * [prompt]
lowerCamelCase__ = sd_pipe.prepare_inputs(_lowerCAmelCase )
lowerCamelCase__ = replicate(_lowerCAmelCase )
lowerCamelCase__ = shard(_lowerCAmelCase )
lowerCamelCase__ = jax.random.PRNGKey(0 )
lowerCamelCase__ = jax.random.split(_lowerCAmelCase ,jax.device_count() )
lowerCamelCase__ = sd_pipe(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,num_inference_steps=25 ,jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
lowerCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ = images[0, 2_53:2_56, 2_53:2_56, -1]
lowerCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """stabilityai/stable-diffusion-2"""
lowerCamelCase__ , lowerCamelCase__ = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase ,subfolder="""scheduler""" )
lowerCamelCase__ , lowerCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCAmelCase ,scheduler=_lowerCAmelCase ,revision="""bf16""" ,dtype=jnp.bfloataa ,)
lowerCamelCase__ = scheduler_params
lowerCamelCase__ = """A painting of a squirrel eating a burger"""
lowerCamelCase__ = jax.device_count()
lowerCamelCase__ = num_samples * [prompt]
lowerCamelCase__ = sd_pipe.prepare_inputs(_lowerCAmelCase )
lowerCamelCase__ = replicate(_lowerCAmelCase )
lowerCamelCase__ = shard(_lowerCAmelCase )
lowerCamelCase__ = jax.random.PRNGKey(0 )
lowerCamelCase__ = jax.random.split(_lowerCAmelCase ,jax.device_count() )
lowerCamelCase__ = sd_pipe(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,num_inference_steps=25 ,jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
lowerCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ = images[0, 2_53:2_56, 2_53:2_56, -1]
lowerCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 9 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ (a ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 )
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""CPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(1 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""GPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""Model""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,)
lowerCamelCase__ = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,)
lowerCamelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) )
self.add(_lowerCAmelCase )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for i, rect in enumerate(_lowerCAmelCase ):
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 )
cpu_target.move_to(_lowerCAmelCase )
cpu_target.generate_target()
lowerCamelCase__ = 0.46 / 4
lowerCamelCase__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 )
cpu_targs.append(_lowerCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) )
second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) )
self.play(*_lowerCAmelCase )
self.play(*_lowerCAmelCase )
self.wait()
| 9 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=12 ,_lowerCAmelCase=7 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=99 ,_lowerCAmelCase=32 ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=0 ,_lowerCAmelCase=None ,):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
lowerCamelCase__ = bos_token_id
def UpperCamelCase_ ( self ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase__ = input_mask.numpy()
lowerCamelCase__ , lowerCamelCase__ = input_mask.shape
lowerCamelCase__ = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowerCAmelCase ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(_lowerCAmelCase )
def UpperCamelCase_ ( self ):
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = TFBlipTextModel(config=_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,training=_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (TFBlipTextModel,) if is_tf_available() else ()
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
lowerCamelCase__ = BlipTextModelTester(self )
lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,hidden_size=37 )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def UpperCamelCase_ ( self ):
pass
@slow
def UpperCamelCase_ ( self ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFBlipTextModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_lowerCAmelCase )
| 9 |
'''simple docstring'''
UpperCamelCase : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCamelCase : list[bool | None] = [None] * 10_00_00_00
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[int] = False
def A__ ( __lowerCAmelCase : int ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase__ = chain(next_number(__lowerCAmelCase ) )
lowerCamelCase__ = number_chain
while number < 1000_0000:
lowerCamelCase__ = number_chain
number *= 10
return number_chain
def A__ ( __lowerCAmelCase : int = 1000_0000 ):
for i in range(1 , __lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 9 | 1 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def A__ ( __lowerCAmelCase : Union[str, Any] ):
if hor == 128:
lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase__ = (32, 128, 256)
lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase__ = (32, 64, 128, 256)
lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
lowerCamelCase__ = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
lowerCamelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
lowerCamelCase__ = model
lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 9 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'donut-swin'
_UpperCamelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=4 ,_lowerCAmelCase=3 ,_lowerCAmelCase=96 ,_lowerCAmelCase=[2, 2, 6, 2] ,_lowerCAmelCase=[3, 6, 12, 24] ,_lowerCAmelCase=7 ,_lowerCAmelCase=4.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=False ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(_lowerCAmelCase )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
| 9 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Path , __lowerCAmelCase : str = None , __lowerCAmelCase : str = None , __lowerCAmelCase : str = None , ):
if config_name_or_path is None:
lowerCamelCase__ = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
lowerCamelCase__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase__ = question_encoder_name_or_path
lowerCamelCase__ = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
lowerCamelCase__ = RagConfig.from_pretrained(__lowerCAmelCase )
lowerCamelCase__ = AutoConfig.from_pretrained(__lowerCAmelCase )
lowerCamelCase__ = AutoConfig.from_pretrained(__lowerCAmelCase )
lowerCamelCase__ = gen_config
lowerCamelCase__ = question_encoder_config
lowerCamelCase__ = model_class.from_pretrained_question_encoder_generator(
__lowerCAmelCase , __lowerCAmelCase , config=__lowerCAmelCase )
rag_model.save_pretrained(__lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(__lowerCAmelCase )
# Save tokenizers.
lowerCamelCase__ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
lowerCamelCase__ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
UpperCamelCase : List[str] = parser.parse_args()
UpperCamelCase : Any = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 9 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
UpperCamelCase : Optional[Any] = ['small', 'medium', 'large']
UpperCamelCase : Dict = 'lm_head.decoder.weight'
UpperCamelCase : int = 'lm_head.weight'
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
lowerCamelCase__ = torch.load(__lowerCAmelCase )
lowerCamelCase__ = d.pop(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
UpperCamelCase : Dict = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
UpperCamelCase : Any = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl')
UpperCamelCase : str = F'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 9 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
UpperCamelCase : Dict = 50_00_00
UpperCamelCase , UpperCamelCase : Optional[int] = os.path.split(__file__)
UpperCamelCase : str = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def A__ ( __lowerCAmelCase : datasets.Dataset , **__lowerCAmelCase : Optional[Any] ):
lowerCamelCase__ = dataset.map(**__lowerCAmelCase )
@get_duration
def A__ ( __lowerCAmelCase : datasets.Dataset , **__lowerCAmelCase : Dict ):
lowerCamelCase__ = dataset.filter(**__lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCamelCase__ = generate_example_dataset(
os.path.join(__lowerCAmelCase , """dataset.arrow""" ) , __lowerCAmelCase , num_examples=__lowerCAmelCase )
lowerCamelCase__ = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__lowerCAmelCase )
def tokenize(__lowerCAmelCase : Any ):
return tokenizer(examples["""text"""] )
lowerCamelCase__ = map(__lowerCAmelCase )
lowerCamelCase__ = map(__lowerCAmelCase , batched=__lowerCAmelCase )
lowerCamelCase__ = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCamelCase__ = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCamelCase__ = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
lowerCamelCase__ = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
lowerCamelCase__ = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
lowerCamelCase__ = map(__lowerCAmelCase , function=__lowerCAmelCase , batched=__lowerCAmelCase )
lowerCamelCase__ = filter(__lowerCAmelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__lowerCAmelCase , """wb""" ) as f:
f.write(json.dumps(__lowerCAmelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 9 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=30 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=0.6 ,_lowerCAmelCase=None ,):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = mask_ratio
lowerCamelCase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ = (image_size // patch_size) ** 2
lowerCamelCase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ):
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = TFViTMAEModel(config=_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
# expected sequence length = num_patches
lowerCamelCase__ = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ = 1
lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase )
lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
lowerCamelCase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs
lowerCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ (a ,a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_UpperCamelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
lowerCamelCase__ = TFViTMAEModelTester(self )
lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
lowerCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase ,tf.keras.layers.Layer ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = copy.deepcopy(self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) )
lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = outputs_dict[0].numpy()
lowerCamelCase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 )
def UpperCamelCase_ ( self ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_lowerCAmelCase ):
lowerCamelCase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_lowerCAmelCase ):
lowerCamelCase__ = v.numpy()
else:
lowerCamelCase__ = np.array(_lowerCAmelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = prepare_numpy_arrays(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ = tf.constant(_lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ = tf_noise
super().check_pt_tf_models(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_lowerCAmelCase )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(_lowerCAmelCase ,_lowerCAmelCase ),)
if isinstance(_lowerCAmelCase ,_lowerCAmelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_lowerCAmelCase ,"""_keras_serializable""" ,_lowerCAmelCase )
}
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ = tf.convert_to_tensor(_lowerCAmelCase )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
lowerCamelCase__ = main_layer_class(_lowerCAmelCase )
lowerCamelCase__ = {
name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowerCamelCase__ = tf.keras.Model(_lowerCAmelCase ,outputs=main_layer(_lowerCAmelCase ) )
lowerCamelCase__ = model(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""keras_model.h5""" )
model.save(_lowerCAmelCase )
lowerCamelCase__ = tf.keras.models.load_model(
_lowerCAmelCase ,custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_lowerCAmelCase ,tf.keras.Model )
lowerCamelCase__ = model(_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ = outputs.last_hidden_state.numpy()
lowerCamelCase__ = 0
else:
lowerCamelCase__ = outputs.logits.numpy()
lowerCamelCase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase ,saved_model=_lowerCAmelCase )
lowerCamelCase__ = model_class.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ = after_outputs["""last_hidden_state"""].numpy()
lowerCamelCase__ = 0
else:
lowerCamelCase__ = after_outputs["""logits"""].numpy()
lowerCamelCase__ = 0
lowerCamelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase ,1E-5 )
def UpperCamelCase_ ( self ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_lowerCAmelCase )
lowerCamelCase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowerCamelCase__ = model_class.from_config(model.config )
lowerCamelCase__ = new_model(_lowerCAmelCase ) # Build model
new_model.set_weights(model.get_weights() )
lowerCamelCase__ = new_model(_lowerCAmelCase ,noise=_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def UpperCamelCase_ ( self ):
pass
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(_lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ = ViTMAEConfig()
lowerCamelCase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase )
# verify the logits
lowerCamelCase__ = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape ,_lowerCAmelCase )
lowerCamelCase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] ,_lowerCAmelCase ,atol=1E-4 )
| 9 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Dict = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
UpperCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,):
lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18}
lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = min_resolution
lowerCamelCase__ = max_resolution
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = do_center_crop
lowerCamelCase__ = crop_size
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean
lowerCamelCase__ = image_std
def UpperCamelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
lowerCamelCase__ = LevitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,Image.Image )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,np.ndarray )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,torch.Tensor )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 9 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase : Tuple = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=8 ):
lowerCamelCase__ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowerCamelCase__ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,):
super().__init__()
self.register_modules(
text_encoder=_lowerCAmelCase ,tokenizer=_lowerCAmelCase ,unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ,movq=_lowerCAmelCase ,)
lowerCamelCase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
if latents is None:
lowerCamelCase__ = randn_tensor(_lowerCAmelCase ,generator=_lowerCAmelCase ,device=_lowerCAmelCase ,dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowerCamelCase__ = latents.to(_lowerCAmelCase )
lowerCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,):
lowerCamelCase__ = len(_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else 1
# get prompt text embeddings
lowerCamelCase__ = self.tokenizer(
_lowerCAmelCase ,padding="""max_length""" ,truncation=_lowerCAmelCase ,max_length=77 ,return_attention_mask=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ,return_tensors="""pt""" ,)
lowerCamelCase__ = text_inputs.input_ids
lowerCamelCase__ = self.tokenizer(_lowerCAmelCase ,padding="""longest""" ,return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCamelCase__ = text_input_ids.to(_lowerCAmelCase )
lowerCamelCase__ = text_inputs.attention_mask.to(_lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = self.text_encoder(
input_ids=_lowerCAmelCase ,attention_mask=_lowerCAmelCase )
lowerCamelCase__ = prompt_embeds.repeat_interleave(_lowerCAmelCase ,dim=0 )
lowerCamelCase__ = text_encoder_hidden_states.repeat_interleave(_lowerCAmelCase ,dim=0 )
lowerCamelCase__ = text_mask.repeat_interleave(_lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ = 42
if negative_prompt is None:
lowerCamelCase__ = [""""""] * batch_size
elif type(_lowerCAmelCase ) is not type(_lowerCAmelCase ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(_lowerCAmelCase )} !='''
F''' {type(_lowerCAmelCase )}.''' )
elif isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = [negative_prompt]
elif batch_size != len(_lowerCAmelCase ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(_lowerCAmelCase )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
lowerCamelCase__ = negative_prompt
lowerCamelCase__ = self.tokenizer(
_lowerCAmelCase ,padding="""max_length""" ,max_length=77 ,truncation=_lowerCAmelCase ,return_attention_mask=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ,return_tensors="""pt""" ,)
lowerCamelCase__ = uncond_input.input_ids.to(_lowerCAmelCase )
lowerCamelCase__ = uncond_input.attention_mask.to(_lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = self.text_encoder(
input_ids=_lowerCAmelCase ,attention_mask=_lowerCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase__ = negative_prompt_embeds.shape[1]
lowerCamelCase__ = negative_prompt_embeds.repeat(1 ,_lowerCAmelCase )
lowerCamelCase__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,_lowerCAmelCase )
lowerCamelCase__ = uncond_text_encoder_hidden_states.shape[1]
lowerCamelCase__ = uncond_text_encoder_hidden_states.repeat(1 ,_lowerCAmelCase ,1 )
lowerCamelCase__ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt ,_lowerCAmelCase ,-1 )
lowerCamelCase__ = uncond_text_mask.repeat_interleave(_lowerCAmelCase ,dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowerCamelCase__ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowerCamelCase__ = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def UpperCamelCase_ ( self ,_lowerCAmelCase=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCamelCase__ = torch.device(F'''cuda:{gpu_id}''' )
lowerCamelCase__ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" ,"""0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCamelCase__ = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" ,silence_dtype_warnings=_lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase__ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowerCamelCase__ , lowerCamelCase__ = cpu_offload_with_hook(_lowerCAmelCase ,_lowerCAmelCase ,prev_module_hook=_lowerCAmelCase )
if self.safety_checker is not None:
lowerCamelCase__ , lowerCamelCase__ = cpu_offload_with_hook(self.safety_checker ,_lowerCAmelCase ,prev_module_hook=_lowerCAmelCase )
# We'll offload the last model manually.
lowerCamelCase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase_ ( self ):
if not hasattr(self.unet ,"""_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCAmelCase ,"""_hf_hook""" )
and hasattr(module._hf_hook ,"""execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = 5_12 ,_lowerCAmelCase = 5_12 ,_lowerCAmelCase = 1_00 ,_lowerCAmelCase = 4.0 ,_lowerCAmelCase = 1 ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = "pil" ,_lowerCAmelCase = True ,):
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = 1
elif isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = len(_lowerCAmelCase )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_lowerCAmelCase )}''' )
lowerCamelCase__ = self._execution_device
lowerCamelCase__ = batch_size * num_images_per_prompt
lowerCamelCase__ = guidance_scale > 1.0
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._encode_prompt(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = torch.cat(_lowerCAmelCase ,dim=0 )
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = torch.cat(_lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ = image_embeds.repeat_interleave(_lowerCAmelCase ,dim=0 )
lowerCamelCase__ = negative_image_embeds.repeat_interleave(_lowerCAmelCase ,dim=0 )
lowerCamelCase__ = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(
dtype=prompt_embeds.dtype ,device=_lowerCAmelCase )
self.scheduler.set_timesteps(_lowerCAmelCase ,device=_lowerCAmelCase )
lowerCamelCase__ = self.scheduler.timesteps
lowerCamelCase__ = self.unet.config.in_channels
lowerCamelCase__ , lowerCamelCase__ = get_new_h_w(_lowerCAmelCase ,_lowerCAmelCase ,self.movq_scale_factor )
# create initial latent
lowerCamelCase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,text_encoder_hidden_states.dtype ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
lowerCamelCase__ = self.unet(
sample=_lowerCAmelCase ,timestep=_lowerCAmelCase ,encoder_hidden_states=_lowerCAmelCase ,added_cond_kwargs=_lowerCAmelCase ,return_dict=_lowerCAmelCase ,)[0]
if do_classifier_free_guidance:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.split(latents.shape[1] ,dim=1 )
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ , lowerCamelCase__ = variance_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase__ = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"""variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase__ , lowerCamelCase__ = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ = self.scheduler.step(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,generator=_lowerCAmelCase ,).prev_sample
# post-processing
lowerCamelCase__ = self.movq.decode(_lowerCAmelCase ,force_not_quantize=_lowerCAmelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowerCamelCase__ = image * 0.5 + 0.5
lowerCamelCase__ = image.clamp(0 ,1 )
lowerCamelCase__ = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
lowerCamelCase__ = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 9 |
'''simple docstring'''
import numpy
# List of input, output pairs
UpperCamelCase : List[Any] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCamelCase : Optional[int] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
UpperCamelCase : int = [2, 4, 1, 5]
UpperCamelCase : int = len(train_data)
UpperCamelCase : Dict = 0.009
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : str="train" ):
return calculate_hypothesis_value(__lowerCAmelCase , __lowerCAmelCase ) - output(
__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Any ):
lowerCamelCase__ = 0
for i in range(len(__lowerCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=m ):
lowerCamelCase__ = 0
for i in range(__lowerCAmelCase ):
if index == -1:
summation_value += _error(__lowerCAmelCase )
else:
summation_value += _error(__lowerCAmelCase ) * train_data[i][0][index]
return summation_value
def A__ ( __lowerCAmelCase : List[Any] ):
lowerCamelCase__ = summation_of_cost_derivative(__lowerCAmelCase , __lowerCAmelCase ) / m
return cost_derivative_value
def A__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase__ = 0.00_0002
lowerCamelCase__ = 0
lowerCamelCase__ = 0
while True:
j += 1
lowerCamelCase__ = [0, 0, 0, 0]
for i in range(0 , len(__lowerCAmelCase ) ):
lowerCamelCase__ = get_cost_derivative(i - 1 )
lowerCamelCase__ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase , rtol=__lowerCAmelCase , ):
break
lowerCamelCase__ = temp_parameter_vector
print(("""Number of iterations:""", j) )
def A__ ( ):
for i in range(len(__lowerCAmelCase ) ):
print(("""Actual output value:""", output(__lowerCAmelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__lowerCAmelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 9 | 1 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
UpperCamelCase : List[Any] = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
UpperCamelCase : Optional[Any] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
UpperCamelCase : Tuple = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCamelCase__ (datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" ,id="""sequence""" ) ,id="""references""" ),
} ) ,codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] ,reference_urls=[
"""https://github.com/m-popovic/chrF""",
] ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = CHRF.CHAR_ORDER ,_lowerCAmelCase = CHRF.WORD_ORDER ,_lowerCAmelCase = CHRF.BETA ,_lowerCAmelCase = False ,_lowerCAmelCase = False ,_lowerCAmelCase = False ,):
lowerCamelCase__ = len(references[0] )
if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowerCamelCase__ = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )]
lowerCamelCase__ = CHRF(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = sb_chrf.corpus_score(_lowerCAmelCase ,_lowerCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 9 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ):
lowerCamelCase__ = OmegaConf.load(__lowerCAmelCase )
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""]
lowerCamelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCamelCase__ = {}
lowerCamelCase__ = """first_stage_model."""
for key in keys:
if key.startswith(__lowerCAmelCase ):
lowerCamelCase__ = state_dict[key]
# extract state_dict for UNetLDM
lowerCamelCase__ = {}
lowerCamelCase__ = """model.diffusion_model."""
for key in keys:
if key.startswith(__lowerCAmelCase ):
lowerCamelCase__ = state_dict[key]
lowerCamelCase__ = config.model.params.first_stage_config.params
lowerCamelCase__ = config.model.params.unet_config.params
lowerCamelCase__ = VQModel(**__lowerCAmelCase ).eval()
vqvae.load_state_dict(__lowerCAmelCase )
lowerCamelCase__ = UNetLDMModel(**__lowerCAmelCase ).eval()
unet.load_state_dict(__lowerCAmelCase )
lowerCamelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , )
lowerCamelCase__ = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
pipeline.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
UpperCamelCase : List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 9 | 1 |
'''simple docstring'''
from datetime import datetime
import requests
def A__ ( __lowerCAmelCase : str ):
lowerCamelCase__ = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
lowerCamelCase__ = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(__lowerCAmelCase ).content
if __name__ == "__main__":
UpperCamelCase : str = input('Enter Video/IGTV url: ').strip()
UpperCamelCase : str = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F'Done. Video saved to disk as {file_name}.')
| 9 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : tuple[int, ...] ):
lowerCamelCase__ = ""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
for keychar, cipherchar in zip(cycle(__lowerCAmelCase ) , __lowerCAmelCase ):
lowerCamelCase__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__lowerCAmelCase )
return decoded
def A__ ( __lowerCAmelCase : list[int] ):
lowerCamelCase__ = []
for key in product(__lowerCAmelCase , repeat=3 ):
lowerCamelCase__ = try_key(__lowerCAmelCase , __lowerCAmelCase )
if encoded is not None:
possibles.append(__lowerCAmelCase )
return possibles
def A__ ( __lowerCAmelCase : list[str] , __lowerCAmelCase : str ):
return [possible for possible in possibles if common_word in possible.lower()]
def A__ ( __lowerCAmelCase : str = "p059_cipher.txt" ):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = Path(__lowerCAmelCase ).parent.joinpath(__lowerCAmelCase ).read_text(encoding="""utf-8""" )
lowerCamelCase__ = [int(__lowerCAmelCase ) for number in data.strip().split(""",""" )]
lowerCamelCase__ = filter_valid_chars(__lowerCAmelCase )
for common_word in COMMON_WORDS:
lowerCamelCase__ = filter_common_word(__lowerCAmelCase , __lowerCAmelCase )
if len(__lowerCAmelCase ) == 1:
break
lowerCamelCase__ = possibles[0]
return sum(ord(__lowerCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 1 |
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ):
lowerCamelCase__ = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert("""RGB""" )
lowerCamelCase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
lowerCamelCase__ = transform(__lowerCAmelCase ).unsqueeze(0 ).to(__lowerCAmelCase )
return image
def A__ ( __lowerCAmelCase : Tuple ):
if "visual_encoder" in key:
lowerCamelCase__ = re.sub("""visual_encoder*""" , """vision_model.encoder""" , __lowerCAmelCase )
if "blocks" in key:
lowerCamelCase__ = re.sub(R"""blocks""" , """layers""" , __lowerCAmelCase )
if "attn" in key:
lowerCamelCase__ = re.sub(R"""attn""" , """self_attn""" , __lowerCAmelCase )
if "norm1" in key:
lowerCamelCase__ = re.sub(R"""norm1""" , """layer_norm1""" , __lowerCAmelCase )
if "norm2" in key:
lowerCamelCase__ = re.sub(R"""norm2""" , """layer_norm2""" , __lowerCAmelCase )
if "encoder.norm" in key:
lowerCamelCase__ = re.sub(R"""encoder.norm""" , """post_layernorm""" , __lowerCAmelCase )
if "encoder.patch_embed.proj" in key:
lowerCamelCase__ = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , __lowerCAmelCase )
if "encoder.pos_embed" in key:
lowerCamelCase__ = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , __lowerCAmelCase )
if "encoder.cls_token" in key:
lowerCamelCase__ = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , __lowerCAmelCase )
if "self_attn" in key:
lowerCamelCase__ = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , __lowerCAmelCase )
return key
@torch.no_grad()
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]=None ):
if config_path is not None:
lowerCamelCase__ = BlipConfig.from_pretrained(__lowerCAmelCase )
else:
lowerCamelCase__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowerCamelCase__ = BlipForConditionalGeneration(__lowerCAmelCase ).eval()
lowerCamelCase__ = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
lowerCamelCase__ = blip_decoder(pretrained=__lowerCAmelCase , image_size=384 , vit="""base""" )
lowerCamelCase__ = pt_model.eval()
lowerCamelCase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowerCamelCase__ = modified_state_dict.pop(__lowerCAmelCase )
lowerCamelCase__ = rename_key(__lowerCAmelCase )
lowerCamelCase__ = value
hf_model.load_state_dict(__lowerCAmelCase )
lowerCamelCase__ = 384
lowerCamelCase__ = load_demo_image(image_size=__lowerCAmelCase , device="""cpu""" )
lowerCamelCase__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowerCamelCase__ = tokenizer(["""a picture of"""] ).input_ids
lowerCamelCase__ = hf_model.generate(__lowerCAmelCase , __lowerCAmelCase )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowerCamelCase__ = hf_model.generate(__lowerCAmelCase )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__lowerCAmelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowerCamelCase__ = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
lowerCamelCase__ = blip_vqa(pretrained=__lowerCAmelCase , image_size=__lowerCAmelCase , vit="""base""" )
vqa_model.eval()
lowerCamelCase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowerCamelCase__ = modified_state_dict.pop(__lowerCAmelCase )
lowerCamelCase__ = rename_key(__lowerCAmelCase )
lowerCamelCase__ = value
lowerCamelCase__ = BlipForQuestionAnswering(__lowerCAmelCase )
hf_vqa_model.load_state_dict(__lowerCAmelCase )
lowerCamelCase__ = ["""How many dogs are in this image?"""]
lowerCamelCase__ = tokenizer(__lowerCAmelCase , return_tensors="""pt""" ).input_ids
lowerCamelCase__ = hf_vqa_model.generate(__lowerCAmelCase , __lowerCAmelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
lowerCamelCase__ = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
lowerCamelCase__ = blip_itm(pretrained=__lowerCAmelCase , image_size=__lowerCAmelCase , vit="""base""" )
itm_model.eval()
lowerCamelCase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowerCamelCase__ = modified_state_dict.pop(__lowerCAmelCase )
lowerCamelCase__ = rename_key(__lowerCAmelCase )
lowerCamelCase__ = value
lowerCamelCase__ = BlipForImageTextRetrieval(__lowerCAmelCase )
lowerCamelCase__ = ["""A picture of a woman with a dog sitting in a beach"""]
lowerCamelCase__ = tokenizer(
__lowerCAmelCase , return_tensors="""pt""" , padding="""max_length""" , truncation=__lowerCAmelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__lowerCAmelCase )
hf_itm_model.eval()
lowerCamelCase__ = hf_itm_model(__lowerCAmelCase , __lowerCAmelCase , use_itm_head=__lowerCAmelCase )
lowerCamelCase__ = hf_itm_model(__lowerCAmelCase , __lowerCAmelCase , use_itm_head=__lowerCAmelCase )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCamelCase : int = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 9 |
'''simple docstring'''
import argparse
import struct
import unittest
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ):
lowerCamelCase__ = data
# Initialize hash values
lowerCamelCase__ = [
0x6a_09_e6_67,
0xbb_67_ae_85,
0x3c_6e_f3_72,
0xa5_4f_f5_3a,
0x51_0e_52_7f,
0x9b_05_68_8c,
0x1f_83_d9_ab,
0x5b_e0_cd_19,
]
# Initialize round constants
lowerCamelCase__ = [
0x42_8a_2f_98,
0x71_37_44_91,
0xb5_c0_fb_cf,
0xe9_b5_db_a5,
0x39_56_c2_5b,
0x59_f1_11_f1,
0x92_3f_82_a4,
0xab_1c_5e_d5,
0xd8_07_aa_98,
0x12_83_5b_01,
0x24_31_85_be,
0x55_0c_7d_c3,
0x72_be_5d_74,
0x80_de_b1_fe,
0x9b_dc_06_a7,
0xc1_9b_f1_74,
0xe4_9b_69_c1,
0xef_be_47_86,
0x0f_c1_9d_c6,
0x24_0c_a1_cc,
0x2d_e9_2c_6f,
0x4a_74_84_aa,
0x5c_b0_a9_dc,
0x76_f9_88_da,
0x98_3e_51_52,
0xa8_31_c6_6d,
0xb0_03_27_c8,
0xbf_59_7f_c7,
0xc6_e0_0b_f3,
0xd5_a7_91_47,
0x06_ca_63_51,
0x14_29_29_67,
0x27_b7_0a_85,
0x2e_1b_21_38,
0x4d_2c_6d_fc,
0x53_38_0d_13,
0x65_0a_73_54,
0x76_6a_0a_bb,
0x81_c2_c9_2e,
0x92_72_2c_85,
0xa2_bf_e8_a1,
0xa8_1a_66_4b,
0xc2_4b_8b_70,
0xc7_6c_51_a3,
0xd1_92_e8_19,
0xd6_99_06_24,
0xf4_0e_35_85,
0x10_6a_a0_70,
0x19_a4_c1_16,
0x1e_37_6c_08,
0x27_48_77_4c,
0x34_b0_bc_b5,
0x39_1c_0c_b3,
0x4e_d8_aa_4a,
0x5b_9c_ca_4f,
0x68_2e_6f_f3,
0x74_8f_82_ee,
0x78_a5_63_6f,
0x84_c8_78_14,
0x8c_c7_02_08,
0x90_be_ff_fa,
0xa4_50_6c_eb,
0xbe_f9_a3_f7,
0xc6_71_78_f2,
]
lowerCamelCase__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCamelCase_ ( _lowerCAmelCase ):
lowerCamelCase__ = B"""\x80""" + (B"""\x00""" * (63 - (len(_lowerCAmelCase ) + 8) % 64))
lowerCamelCase__ = struct.pack(""">Q""" ,(len(_lowerCAmelCase ) * 8) )
return data + padding + big_endian_integer
def UpperCamelCase_ ( self ):
# Convert into blocks of 64 bytes
lowerCamelCase__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCamelCase__ = list(struct.unpack(""">16L""" ,_lowerCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCamelCase__ = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
lowerCamelCase__ = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
lowerCamelCase__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
lowerCamelCase__ = self.ror(_lowerCAmelCase ,6 ) ^ self.ror(_lowerCAmelCase ,11 ) ^ self.ror(_lowerCAmelCase ,25 )
lowerCamelCase__ = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g)
lowerCamelCase__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
lowerCamelCase__ = self.ror(_lowerCAmelCase ,2 ) ^ self.ror(_lowerCAmelCase ,13 ) ^ self.ror(_lowerCAmelCase ,22 )
lowerCamelCase__ = (a & b) ^ (a & c) ^ (b & c)
lowerCamelCase__ = (sa + maj) % 0x1_00_00_00_00
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
lowerCamelCase__ = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCamelCase__ = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
lowerCamelCase__ = """""".join([hex(_lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
import hashlib
lowerCamelCase__ = bytes("""Test String""" ,"""utf-8""" )
self.assertEqual(SHAaaa(_lowerCAmelCase ).hash ,hashlib.shaaaa(_lowerCAmelCase ).hexdigest() )
def A__ ( ):
import doctest
doctest.testmod()
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = bytes(__lowerCAmelCase , """utf-8""" )
print(SHAaaa(__lowerCAmelCase ).hash )
if __name__ == "__main__":
main()
| 9 | 1 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def A__ ( __lowerCAmelCase : Dict ):
lowerCamelCase__ = FileLock(str(tmpdir / """foo.lock""" ) )
lowerCamelCase__ = FileLock(str(tmpdir / """foo.lock""" ) )
lowerCamelCase__ = 0.01
with locka.acquire():
with pytest.raises(__lowerCAmelCase ):
lowerCamelCase__ = time.time()
locka.acquire(__lowerCAmelCase )
assert time.time() - _start > timeout
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = """a""" * 1000 + """.lock"""
lowerCamelCase__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(__lowerCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
lowerCamelCase__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__lowerCAmelCase ):
locka.acquire(0 )
| 9 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def A__ ( __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Tuple ):
lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
lowerCamelCase__ = emb.weight.data
return lin_layer
def A__ ( __lowerCAmelCase : Dict ):
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
lowerCamelCase__ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
lowerCamelCase__ = mam_aaa["""model"""]
remove_ignore_keys_(__lowerCAmelCase )
lowerCamelCase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0]
lowerCamelCase__ = MaMaaaConfig(
vocab_size=__lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
lowerCamelCase__ = state_dict["""decoder.embed_tokens.weight"""]
lowerCamelCase__ = MaMaaaForConditionalGeneration(__lowerCAmelCase )
model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
lowerCamelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase : Tuple = parser.parse_args()
UpperCamelCase : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 9 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = KandinskyInpaintPipeline
_UpperCamelCase = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_UpperCamelCase = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_UpperCamelCase = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCamelCase = False
@property
def UpperCamelCase_ ( self ):
return 32
@property
def UpperCamelCase_ ( self ):
return 32
@property
def UpperCamelCase_ ( self ):
return self.time_input_dim
@property
def UpperCamelCase_ ( self ):
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self ):
return 1_00
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowerCamelCase__ = MCLIPConfig(
numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=10_05 ,)
lowerCamelCase__ = MultilingualCLIP(_lowerCAmelCase )
lowerCamelCase__ = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowerCamelCase__ = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowerCamelCase__ = UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def UpperCamelCase_ ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowerCamelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.dummy_text_encoder
lowerCamelCase__ = self.dummy_tokenizer
lowerCamelCase__ = self.dummy_unet
lowerCamelCase__ = self.dummy_movq
lowerCamelCase__ = DDIMScheduler(
num_train_timesteps=10_00 ,beta_schedule="""linear""" ,beta_start=0.0_0085 ,beta_end=0.012 ,clip_sample=_lowerCAmelCase ,set_alpha_to_one=_lowerCAmelCase ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_lowerCAmelCase ,)
lowerCamelCase__ = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=0 ):
lowerCamelCase__ = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
lowerCamelCase__ = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(_lowerCAmelCase )
# create init_image
lowerCamelCase__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
lowerCamelCase__ = image.cpu().permute(0 ,2 ,3 ,1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
lowerCamelCase__ = np.ones((64, 64) ,dtype=np.floataa )
lowerCamelCase__ = 0
if str(_lowerCAmelCase ).startswith("""mps""" ):
lowerCamelCase__ = torch.manual_seed(_lowerCAmelCase )
else:
lowerCamelCase__ = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
lowerCamelCase__ = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """cpu"""
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**_lowerCAmelCase )
lowerCamelCase__ = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCamelCase__ = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
lowerCamelCase__ = output.images
lowerCamelCase__ = pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) ,return_dict=_lowerCAmelCase ,)[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def UpperCamelCase_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
lowerCamelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowerCamelCase__ = np.ones((7_68, 7_68) ,dtype=np.floataa )
lowerCamelCase__ = 0
lowerCamelCase__ = """a hat"""
lowerCamelCase__ = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
lowerCamelCase__ = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" ,torch_dtype=torch.floataa )
lowerCamelCase__ = pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCamelCase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ = pipe_prior(
_lowerCAmelCase ,generator=_lowerCAmelCase ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
lowerCamelCase__ = pipeline(
_lowerCAmelCase ,image=_lowerCAmelCase ,mask_image=_lowerCAmelCase ,image_embeds=_lowerCAmelCase ,negative_image_embeds=_lowerCAmelCase ,generator=_lowerCAmelCase ,num_inference_steps=1_00 ,height=7_68 ,width=7_68 ,output_type="""np""" ,)
lowerCamelCase__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowerCAmelCase ,_lowerCAmelCase )
| 9 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = BlipImageProcessor()
lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowerCamelCase__ = BlipProcessor(_lowerCAmelCase ,_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).tokenizer
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor
def UpperCamelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self ):
lowerCamelCase__ = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 )
lowerCamelCase__ = BlipProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" )
lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = processor(text=_lowerCAmelCase )
lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
| 9 | 1 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A__ ( __lowerCAmelCase : Optional[int] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def A__ ( __lowerCAmelCase : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCamelCase__ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = set()
for token in tokens:
lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowerCamelCase__ = list(__lowerCAmelCase )
return word_list
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ):
if not chinese_word_set:
return bert_tokens
lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowerCamelCase__ = bert_tokens
lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase )
while start < end:
lowerCamelCase__ = True
if is_chinese(bert_word[start] ):
lowerCamelCase__ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowerCamelCase__ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCamelCase__ = """##""" + bert_word[j]
lowerCamelCase__ = start + i
lowerCamelCase__ = False
break
if single_word:
start += 1
return bert_word
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ):
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = []
for id in input_ids:
lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowerCamelCase__ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def A__ ( __lowerCAmelCase : Dict ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device
lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert )
lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : Tuple = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
UpperCamelCase : List[str] = parser.parse_args()
main(args)
| 9 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def A__ ( __lowerCAmelCase : Union[str, Any] ):
if hor == 128:
lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase__ = (32, 128, 256)
lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase__ = (32, 64, 128, 256)
lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
lowerCamelCase__ = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
lowerCamelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
lowerCamelCase__ = model
lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 9 | 1 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def A__ ( __lowerCAmelCase : Any ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def A__ ( ):
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCamelCase__ = [1, 2, 3]
with pytest.raises(__lowerCAmelCase ):
with parallel_backend("""unsupported backend""" ):
map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=2 )
with pytest.raises(__lowerCAmelCase ):
with parallel_backend("""unsupported backend""" ):
map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = [1, 2]
lowerCamelCase__ = {"""a""": 1, """b""": 2}
lowerCamelCase__ = {"""a""": [1, 2], """b""": [3, 4]}
lowerCamelCase__ = {"""a""": {"""1""": 1}, """b""": 2}
lowerCamelCase__ = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
lowerCamelCase__ = [2, 3]
lowerCamelCase__ = {"""a""": 2, """b""": 3}
lowerCamelCase__ = {"""a""": [2, 3], """b""": [4, 5]}
lowerCamelCase__ = {"""a""": {"""1""": 2}, """b""": 3}
lowerCamelCase__ = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
| 9 |
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase = "▁" ,_lowerCAmelCase = True ,_lowerCAmelCase = "<unk>" ,_lowerCAmelCase = "</s>" ,_lowerCAmelCase = "<pad>" ,):
lowerCamelCase__ = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
lowerCamelCase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowerCamelCase__ = token_dict["""token"""]
lowerCamelCase__ = Tokenizer(Unigram() )
lowerCamelCase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) ,""" """ ),
normalizers.Lowercase(),
] )
lowerCamelCase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ),
pre_tokenizers.Punctuation(),
] )
lowerCamelCase__ = decoders.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase )
lowerCamelCase__ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' ,special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] ,)
lowerCamelCase__ = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,):
lowerCamelCase__ = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,)
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = [files]
self._tokenizer.train(_lowerCAmelCase ,trainer=_lowerCAmelCase )
self.add_unk_id()
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,):
lowerCamelCase__ = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,)
self._tokenizer.train_from_iterator(_lowerCAmelCase ,trainer=_lowerCAmelCase )
self.add_unk_id()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = json.loads(self._tokenizer.to_str() )
lowerCamelCase__ = self.special_tokens["""unk"""]["""id"""]
lowerCamelCase__ = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) )
| 9 | 1 |
'''simple docstring'''
from collections import deque
def A__ ( __lowerCAmelCase : Optional[int] ):
lowerCamelCase__ = len(__lowerCAmelCase )
lowerCamelCase__ = deque()
lowerCamelCase__ = [False for _ in range(__lowerCAmelCase )]
lowerCamelCase__ = [-1 for _ in range(__lowerCAmelCase )]
lowerCamelCase__ = index_of[:]
def strong_connect(__lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] ):
lowerCamelCase__ = index # the number when this node is seen
lowerCamelCase__ = index # lowest rank node reachable from here
index += 1
stack.append(__lowerCAmelCase )
lowerCamelCase__ = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase__ = strong_connect(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase__ = []
lowerCamelCase__ = stack.pop()
lowerCamelCase__ = False
component.append(__lowerCAmelCase )
while w != v:
lowerCamelCase__ = stack.pop()
lowerCamelCase__ = False
component.append(__lowerCAmelCase )
components.append(__lowerCAmelCase )
return index
lowerCamelCase__ = []
for v in range(__lowerCAmelCase ):
if index_of[v] == -1:
strong_connect(__lowerCAmelCase , 0 , __lowerCAmelCase )
return components
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = [[] for _ in range(__lowerCAmelCase )]
for u, v in edges:
g[u].append(__lowerCAmelCase )
return g
if __name__ == "__main__":
# Test
UpperCamelCase : Dict = 7
UpperCamelCase : Dict = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCamelCase : str = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCamelCase : List[str] = [(u, v) for u, v in zip(source, target)]
UpperCamelCase : Optional[int] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 9 |
'''simple docstring'''
from __future__ import annotations
import math
def A__ ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
lowerCamelCase__ = []
for num in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ = odd_composites[num] - 2 * i * i
if is_prime(__lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__lowerCAmelCase ) == n:
return list_nums
return []
def A__ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,):
lowerCamelCase__ = parent
lowerCamelCase__ = 13
lowerCamelCase__ = 7
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = 99
lowerCamelCase__ = 32
lowerCamelCase__ = 2
lowerCamelCase__ = 4
lowerCamelCase__ = 37
lowerCamelCase__ = """gelu"""
lowerCamelCase__ = 0.1
lowerCamelCase__ = 0.1
lowerCamelCase__ = 5_12
lowerCamelCase__ = 16
lowerCamelCase__ = 2
lowerCamelCase__ = 0.02
lowerCamelCase__ = 3
lowerCamelCase__ = 4
lowerCamelCase__ = None
def UpperCamelCase_ ( self ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.num_choices )
lowerCamelCase__ = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase__ = True
lowerCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = TFEsmModel(config=_lowerCAmelCase )
lowerCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCamelCase__ = model(_lowerCAmelCase )
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,):
lowerCamelCase__ = True
lowerCamelCase__ = TFEsmModel(config=_lowerCAmelCase )
lowerCamelCase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
lowerCamelCase__ = model(_lowerCAmelCase )
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(_lowerCAmelCase ,encoder_hidden_states=_lowerCAmelCase )
# Also check the case where encoder outputs are not passed
lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = TFEsmForMaskedLM(config=_lowerCAmelCase )
lowerCamelCase__ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFEsmForTokenClassification(config=_lowerCAmelCase )
lowerCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCamelCase__ = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase__ (a ,a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_UpperCamelCase = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
lowerCamelCase__ = TFEsmModelTester(self )
lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,hidden_size=37 )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFEsmModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase__ = model.get_bias()
assert isinstance(_lowerCAmelCase ,_lowerCAmelCase )
for k, v in name.items():
assert isinstance(_lowerCAmelCase ,tf.Variable )
else:
lowerCamelCase__ = model.get_output_embeddings()
assert x is None
lowerCamelCase__ = model.get_bias()
assert name is None
@require_tf
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowerCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ = model(_lowerCAmelCase )[0]
lowerCamelCase__ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,_lowerCAmelCase )
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-2 ) )
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowerCamelCase__ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase__ = model(_lowerCAmelCase )[0]
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 9 |
'''simple docstring'''
def A__ ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(__lowerCAmelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 1 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCamelCase : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a )
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,**_lowerCAmelCase ):
super().__init__(**_lowerCAmelCase )
requires_backends(self ,"""vision""" )
requires_backends(self ,"""torch""" )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(_lowerCAmelCase )
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
lowerCamelCase__ = {}
lowerCamelCase__ = {}
lowerCamelCase__ = {}
# preprocess args
if "points_per_batch" in kwargs:
lowerCamelCase__ = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
lowerCamelCase__ = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
lowerCamelCase__ = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
lowerCamelCase__ = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
lowerCamelCase__ = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
lowerCamelCase__ = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
lowerCamelCase__ = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
lowerCamelCase__ = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
lowerCamelCase__ = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
lowerCamelCase__ = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
lowerCamelCase__ = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
lowerCamelCase__ = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self ,_lowerCAmelCase ,*_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,**_lowerCAmelCase ):
return super().__call__(_lowerCAmelCase ,*_lowerCAmelCase ,num_workers=_lowerCAmelCase ,batch_size=_lowerCAmelCase ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=64 ,_lowerCAmelCase = 0 ,_lowerCAmelCase = 5_12 / 15_00 ,_lowerCAmelCase = 32 ,_lowerCAmelCase = 1 ,):
lowerCamelCase__ = load_image(_lowerCAmelCase )
lowerCamelCase__ = self.image_processor.size["""longest_edge"""]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.image_processor.generate_crop_boxes(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = self.image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
lowerCamelCase__ = self.get_inference_context()
with inference_context():
lowerCamelCase__ = self._ensure_tensor_on_device(_lowerCAmelCase ,device=self.device )
lowerCamelCase__ = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
lowerCamelCase__ = image_embeddings
lowerCamelCase__ = grid_points.shape[1]
lowerCamelCase__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = grid_points[:, i : i + points_per_batch, :, :]
lowerCamelCase__ = input_labels[:, i : i + points_per_batch]
lowerCamelCase__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=0.88 ,_lowerCAmelCase=0.95 ,_lowerCAmelCase=0 ,_lowerCAmelCase=1 ,):
lowerCamelCase__ = model_inputs.pop("""input_boxes""" )
lowerCamelCase__ = model_inputs.pop("""is_last""" )
lowerCamelCase__ = model_inputs.pop("""original_sizes""" ).tolist()
lowerCamelCase__ = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
lowerCamelCase__ = self.model(**_lowerCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowerCamelCase__ = model_outputs["""pred_masks"""]
lowerCamelCase__ = self.image_processor.post_process_masks(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,binarize=_lowerCAmelCase )
lowerCamelCase__ = model_outputs["""iou_scores"""]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.image_processor.filter_masks(
masks[0] ,iou_scores[0] ,original_sizes[0] ,input_boxes[0] ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,)
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=0.7 ,):
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
lowerCamelCase__ = torch.cat(_lowerCAmelCase )
lowerCamelCase__ = torch.cat(_lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.image_processor.post_process_for_mask_generation(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = defaultdict(_lowerCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(_lowerCAmelCase )
lowerCamelCase__ = {}
if output_rle_mask:
lowerCamelCase__ = rle_mask
if output_bboxes_mask:
lowerCamelCase__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 9 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCamelCase : Dict = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
UpperCamelCase : List[Any] = {
'camembert-base': 5_12,
}
UpperCamelCase : List[str] = '▁'
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,)
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
lowerCamelCase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCamelCase__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
lowerCamelCase__ = len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = []
lowerCamelCase__ = """"""
lowerCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowerCamelCase__ = True
lowerCamelCase__ = []
else:
current_sub_tokens.append(_lowerCAmelCase )
lowerCamelCase__ = False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def __getstate__( self ):
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self ,_lowerCAmelCase ):
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ = os.path.join(
_lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase ,"""wb""" ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 9 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : List[Any] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : int = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = R"""\w+[.]\d+"""
lowerCamelCase__ = re.findall(__lowerCAmelCase , __lowerCAmelCase )
for pat in pats:
lowerCamelCase__ = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) )
return key
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ):
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
lowerCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase__ = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase__ = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=42 ):
# Step 1: Convert pytorch tensor to numpy
lowerCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCamelCase__ = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) )
lowerCamelCase__ = flatten_dict(__lowerCAmelCase )
lowerCamelCase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase__ = rename_key(__lowerCAmelCase )
lowerCamelCase__ = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
lowerCamelCase__ , lowerCamelCase__ = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowerCamelCase__ = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
| 9 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase : Any = logging.get_logger(__name__)
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = ['pixel_values']
def __init__( self ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,_lowerCAmelCase = PILImageResampling.BICUBIC ,_lowerCAmelCase = True ,_lowerCAmelCase = 1 / 2_55 ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = True ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
lowerCamelCase__ = size if size is not None else {"""height""": 3_84, """width""": 3_84}
lowerCamelCase__ = get_size_dict(_lowerCAmelCase ,default_to_square=_lowerCAmelCase )
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = resample
lowerCamelCase__ = do_rescale
lowerCamelCase__ = rescale_factor
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase__ = do_convert_rgb
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = PILImageResampling.BICUBIC ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
lowerCamelCase__ = get_size_dict(_lowerCAmelCase ,default_to_square=_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
lowerCamelCase__ = (size["""height"""], size["""width"""])
return resize(_lowerCAmelCase ,size=_lowerCAmelCase ,resample=_lowerCAmelCase ,data_format=_lowerCAmelCase ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
return rescale(_lowerCAmelCase ,scale=_lowerCAmelCase ,data_format=_lowerCAmelCase ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
return normalize(_lowerCAmelCase ,mean=_lowerCAmelCase ,std=_lowerCAmelCase ,data_format=_lowerCAmelCase ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = ChannelDimension.FIRST ,**_lowerCAmelCase ,):
lowerCamelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ = resample if resample is not None else self.resample
lowerCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ = image_std if image_std is not None else self.image_std
lowerCamelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase__ = size if size is not None else self.size
lowerCamelCase__ = get_size_dict(_lowerCAmelCase ,default_to_square=_lowerCAmelCase )
lowerCamelCase__ = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase__ = [convert_to_rgb(_lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase__ = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
lowerCamelCase__ = [self.resize(image=_lowerCAmelCase ,size=_lowerCAmelCase ,resample=_lowerCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase__ = [self.rescale(image=_lowerCAmelCase ,scale=_lowerCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase__ = [self.normalize(image=_lowerCAmelCase ,mean=_lowerCAmelCase ,std=_lowerCAmelCase ) for image in images]
lowerCamelCase__ = [to_channel_dimension_format(_lowerCAmelCase ,_lowerCAmelCase ) for image in images]
lowerCamelCase__ = BatchFeature(data={"""pixel_values""": images} ,tensor_type=_lowerCAmelCase )
return encoded_outputs
| 9 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ):
lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sgugger/tiny-distilbert-classification"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """patrickvonplaten/t5-tiny-random"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,configs=[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_lowerCAmelCase ):
self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
| 9 | 1 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : list ):
lowerCamelCase__ = 0
while len(__lowerCAmelCase ) > 1:
lowerCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
lowerCamelCase__ = files.index(min(__lowerCAmelCase ) )
temp += files[min_index]
files.pop(__lowerCAmelCase )
files.append(__lowerCAmelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
'''simple docstring'''
from math import factorial
UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) )
def A__ ( __lowerCAmelCase : int = 60 , __lowerCAmelCase : int = 100_0000 ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
lowerCamelCase__ = 0
# the cached sizes of the previous chains
lowerCamelCase__ = {}
for start_chain_element in range(1 , __lowerCAmelCase ):
# The temporary set will contain the elements of the chain
lowerCamelCase__ = set()
lowerCamelCase__ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCamelCase__ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__lowerCAmelCase )
chain_set_length += 1
lowerCamelCase__ = digit_factorial_sum(__lowerCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCamelCase__ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}')
| 9 | 1 |
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
UpperCamelCase : str = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
UpperCamelCase : List[str] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def A__ ( ):
lowerCamelCase__ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , bootstrap_aggregation=__lowerCAmelCase , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , bootstrap_aggregation=__lowerCAmelCase , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def A__ ( ):
lowerCamelCase__ = """rougeLsum"""
lowerCamelCase__ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=[k] )[k]
lowerCamelCase__ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def A__ ( ):
lowerCamelCase__ = ["""rouge1""", """rouge2""", """rougeL"""]
lowerCamelCase__ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=__lowerCAmelCase )
lowerCamelCase__ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=__lowerCAmelCase )
assert score_sep == score_no_sep
def A__ ( ):
lowerCamelCase__ = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowerCamelCase__ = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase ) == calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowerCamelCase__ = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowerCamelCase__ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , rouge_keys=["""rougeLsum"""] , newline_sep=__lowerCAmelCase )["""rougeLsum"""]
lowerCamelCase__ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def A__ ( ):
lowerCamelCase__ = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowerCamelCase__ = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
| 9 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCamelCase : Optional[Any] = 'src/diffusers'
# Matches is_xxx_available()
UpperCamelCase : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
UpperCamelCase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
UpperCamelCase : Optional[int] = '\n{0} = None\n'
UpperCamelCase : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
UpperCamelCase : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def A__ ( __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = _re_backend.findall(__lowerCAmelCase )
if len(__lowerCAmelCase ) == 0:
return None
return "_and_".join(__lowerCAmelCase )
def A__ ( ):
with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase__ = 0
lowerCamelCase__ = {}
# Go through the end of the file
while line_index < len(__lowerCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
lowerCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1:
lowerCamelCase__ = lines[line_index]
lowerCamelCase__ = _re_single_line_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__lowerCAmelCase ) > 0:
lowerCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ):
if name.isupper():
return DUMMY_CONSTANT.format(__lowerCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase )
else:
return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Optional[int]=None ):
if backend_specific_objects is None:
lowerCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
lowerCamelCase__ = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]"""
lowerCamelCase__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] )
lowerCamelCase__ = dummy_file
return dummy_files
def A__ ( __lowerCAmelCase : List[str]=False ):
lowerCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase__ = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
lowerCamelCase__ = os.path.join(__lowerCAmelCase , """utils""" )
lowerCamelCase__ = {
backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main '''
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` '''
"""to fix this.""" )
if __name__ == "__main__":
UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase : Any = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 9 | 1 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = inspect.getfile(accelerate.test_utils )
lowerCamelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
lowerCamelCase__ = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def UpperCamelCase_ ( self ):
lowerCamelCase__ = F'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
lowerCamelCase__ = [sys.executable] + distributed_args
execute_subprocess_async(_lowerCAmelCase ,env=os.environ.copy() )
| 9 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = GPTSwaTokenizer
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = """This is a test"""
lowerCamelCase__ = """This is a test"""
return input_text, output_text
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """<s>"""
lowerCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""j""" )
self.assertEqual(len(_lowerCAmelCase ) ,20_00 )
def UpperCamelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,20_00 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[4_65, 2_87, 2_65, 6_31, 8_42] )
lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
_lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,)
# fmt: on
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase ,[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] ,)
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
# fmt: off
self.assertListEqual(
_lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def UpperCamelCase_ ( self ):
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase )
lowerCamelCase__ = ["""This is a test""", """I was born in 92000, and this is falsé."""]
lowerCamelCase__ = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertListEqual(tokenizer.encode_fast(_lowerCAmelCase ) ,_lowerCAmelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertEqual(tokenizer.decode_fast(_lowerCAmelCase ) ,_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
lowerCamelCase__ = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=_lowerCAmelCase ,)
| 9 | 1 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = []
lowerCamelCase__ = 11
lowerCamelCase__ = int("""1""" + """0""" * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
lowerCamelCase__ = 10
return solutions
def A__ ( __lowerCAmelCase : int = 2 ):
lowerCamelCase__ = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
lowerCamelCase__ = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 9 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ (a ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 )
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""CPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(1 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""GPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""Model""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,)
lowerCamelCase__ = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,)
lowerCamelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) )
self.add(_lowerCAmelCase )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for i, rect in enumerate(_lowerCAmelCase ):
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 )
cpu_target.move_to(_lowerCAmelCase )
cpu_target.generate_target()
lowerCamelCase__ = 0.46 / 4
lowerCamelCase__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 )
cpu_targs.append(_lowerCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) )
second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) )
self.play(*_lowerCAmelCase )
self.play(*_lowerCAmelCase )
self.wait()
| 9 | 1 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = 0
while number > 0:
lowerCamelCase__ = number % 10
sum_of_digits += last_digit
lowerCamelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def A__ ( __lowerCAmelCase : int = 100 ):
lowerCamelCase__ = factorial(__lowerCAmelCase )
lowerCamelCase__ = split_and_add(__lowerCAmelCase )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 9 |
'''simple docstring'''
UpperCamelCase : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCamelCase : list[bool | None] = [None] * 10_00_00_00
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[int] = False
def A__ ( __lowerCAmelCase : int ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase__ = chain(next_number(__lowerCAmelCase ) )
lowerCamelCase__ = number_chain
while number < 1000_0000:
lowerCamelCase__ = number_chain
number *= 10
return number_chain
def A__ ( __lowerCAmelCase : int = 1000_0000 ):
for i in range(1 , __lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 9 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase : Any = logging.get_logger(__name__)
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" ,_lowerCAmelCase ,)
super().__init__(*_lowerCAmelCase ,**_lowerCAmelCase )
| 9 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'donut-swin'
_UpperCamelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=4 ,_lowerCAmelCase=3 ,_lowerCAmelCase=96 ,_lowerCAmelCase=[2, 2, 6, 2] ,_lowerCAmelCase=[3, 6, 12, 24] ,_lowerCAmelCase=7 ,_lowerCAmelCase=4.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=False ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(_lowerCAmelCase )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
| 9 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase : Tuple = logging.get_logger(__name__)
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" ,_lowerCAmelCase ,)
super().__init__(*_lowerCAmelCase ,**_lowerCAmelCase )
| 9 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
UpperCamelCase : Optional[Any] = ['small', 'medium', 'large']
UpperCamelCase : Dict = 'lm_head.decoder.weight'
UpperCamelCase : int = 'lm_head.weight'
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
lowerCamelCase__ = torch.load(__lowerCAmelCase )
lowerCamelCase__ = d.pop(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
UpperCamelCase : Dict = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
UpperCamelCase : Any = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl')
UpperCamelCase : str = F'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 9 | 1 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
lowerCamelCase__ = """"""
lowerCamelCase__ = """"""
lowerCamelCase__ = []
lowerCamelCase__ = 0
lowerCamelCase__ = 2_56
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = cva.imread(_lowerCAmelCase ,0 )
lowerCamelCase__ = copy.deepcopy(self.img )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = plt.hist(self.img.ravel() ,2_56 ,[0, 2_56] ,label="""x""" )
lowerCamelCase__ = np.sum(_lowerCAmelCase )
for i in range(len(_lowerCAmelCase ) ):
lowerCamelCase__ = x[i] / self.k
self.sk += prk
lowerCamelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
lowerCamelCase__ = int(last % last )
lowerCamelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_lowerCAmelCase )
lowerCamelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
lowerCamelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCamelCase__ = self.img[j][i]
if num != self.last_list[num]:
lowerCamelCase__ = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" ,self.img )
def UpperCamelCase_ ( self ):
plt.hist(self.img.ravel() ,2_56 ,[0, 2_56] )
def UpperCamelCase_ ( self ):
cva.imshow("""Output-Image""" ,self.img )
cva.imshow("""Input-Image""" ,self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase : int = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
UpperCamelCase : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 9 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=30 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=0.6 ,_lowerCAmelCase=None ,):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = mask_ratio
lowerCamelCase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ = (image_size // patch_size) ** 2
lowerCamelCase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ):
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = TFViTMAEModel(config=_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
# expected sequence length = num_patches
lowerCamelCase__ = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ = 1
lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase )
lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase )
lowerCamelCase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs
lowerCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ (a ,a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_UpperCamelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
lowerCamelCase__ = TFViTMAEModelTester(self )
lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
lowerCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase ,tf.keras.layers.Layer ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = copy.deepcopy(self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) )
lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = outputs_dict[0].numpy()
lowerCamelCase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 )
def UpperCamelCase_ ( self ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_lowerCAmelCase ):
lowerCamelCase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_lowerCAmelCase ):
lowerCamelCase__ = v.numpy()
else:
lowerCamelCase__ = np.array(_lowerCAmelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = prepare_numpy_arrays(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ = tf.constant(_lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ = tf_noise
super().check_pt_tf_models(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_lowerCAmelCase )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(_lowerCAmelCase ,_lowerCAmelCase ),)
if isinstance(_lowerCAmelCase ,_lowerCAmelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_lowerCAmelCase ,"""_keras_serializable""" ,_lowerCAmelCase )
}
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ = tf.convert_to_tensor(_lowerCAmelCase )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
lowerCamelCase__ = main_layer_class(_lowerCAmelCase )
lowerCamelCase__ = {
name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowerCamelCase__ = tf.keras.Model(_lowerCAmelCase ,outputs=main_layer(_lowerCAmelCase ) )
lowerCamelCase__ = model(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""keras_model.h5""" )
model.save(_lowerCAmelCase )
lowerCamelCase__ = tf.keras.models.load_model(
_lowerCAmelCase ,custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_lowerCAmelCase ,tf.keras.Model )
lowerCamelCase__ = model(_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ = outputs.last_hidden_state.numpy()
lowerCamelCase__ = 0
else:
lowerCamelCase__ = outputs.logits.numpy()
lowerCamelCase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase ,saved_model=_lowerCAmelCase )
lowerCamelCase__ = model_class.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ = after_outputs["""last_hidden_state"""].numpy()
lowerCamelCase__ = 0
else:
lowerCamelCase__ = after_outputs["""logits"""].numpy()
lowerCamelCase__ = 0
lowerCamelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase ,1E-5 )
def UpperCamelCase_ ( self ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase )
lowerCamelCase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_lowerCAmelCase )
lowerCamelCase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowerCamelCase__ = model_class.from_config(model.config )
lowerCamelCase__ = new_model(_lowerCAmelCase ) # Build model
new_model.set_weights(model.get_weights() )
lowerCamelCase__ = new_model(_lowerCAmelCase ,noise=_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def UpperCamelCase_ ( self ):
pass
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(_lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ = ViTMAEConfig()
lowerCamelCase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase )
# verify the logits
lowerCamelCase__ = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape ,_lowerCAmelCase )
lowerCamelCase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] ,_lowerCAmelCase ,atol=1E-4 )
| 9 | 1 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 9 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,):
lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18}
lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = min_resolution
lowerCamelCase__ = max_resolution
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = do_center_crop
lowerCamelCase__ = crop_size
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean
lowerCamelCase__ = image_std
def UpperCamelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
lowerCamelCase__ = LevitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,Image.Image )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,np.ndarray )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase ,torch.Tensor )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 9 | 1 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = R"""\w+[.]\d+"""
lowerCamelCase__ = re.findall(__lowerCAmelCase , __lowerCAmelCase )
for pat in pats:
lowerCamelCase__ = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) )
return key
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ):
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
lowerCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase__ = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase__ = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=42 ):
# Step 1: Convert pytorch tensor to numpy
lowerCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCamelCase__ = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) )
lowerCamelCase__ = flatten_dict(__lowerCAmelCase )
lowerCamelCase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase__ = rename_key(__lowerCAmelCase )
lowerCamelCase__ = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
lowerCamelCase__ , lowerCamelCase__ = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowerCamelCase__ = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
| 9 |
'''simple docstring'''
import numpy
# List of input, output pairs
UpperCamelCase : List[Any] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCamelCase : Optional[int] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
UpperCamelCase : int = [2, 4, 1, 5]
UpperCamelCase : int = len(train_data)
UpperCamelCase : Dict = 0.009
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : str="train" ):
return calculate_hypothesis_value(__lowerCAmelCase , __lowerCAmelCase ) - output(
__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Any ):
lowerCamelCase__ = 0
for i in range(len(__lowerCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=m ):
lowerCamelCase__ = 0
for i in range(__lowerCAmelCase ):
if index == -1:
summation_value += _error(__lowerCAmelCase )
else:
summation_value += _error(__lowerCAmelCase ) * train_data[i][0][index]
return summation_value
def A__ ( __lowerCAmelCase : List[Any] ):
lowerCamelCase__ = summation_of_cost_derivative(__lowerCAmelCase , __lowerCAmelCase ) / m
return cost_derivative_value
def A__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase__ = 0.00_0002
lowerCamelCase__ = 0
lowerCamelCase__ = 0
while True:
j += 1
lowerCamelCase__ = [0, 0, 0, 0]
for i in range(0 , len(__lowerCAmelCase ) ):
lowerCamelCase__ = get_cost_derivative(i - 1 )
lowerCamelCase__ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase , rtol=__lowerCAmelCase , ):
break
lowerCamelCase__ = temp_parameter_vector
print(("""Number of iterations:""", j) )
def A__ ( ):
for i in range(len(__lowerCAmelCase ) ):
print(("""Actual output value:""", output(__lowerCAmelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__lowerCAmelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 9 | 1 |
'''simple docstring'''
UpperCamelCase : Tuple = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 9 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ):
lowerCamelCase__ = OmegaConf.load(__lowerCAmelCase )
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""]
lowerCamelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCamelCase__ = {}
lowerCamelCase__ = """first_stage_model."""
for key in keys:
if key.startswith(__lowerCAmelCase ):
lowerCamelCase__ = state_dict[key]
# extract state_dict for UNetLDM
lowerCamelCase__ = {}
lowerCamelCase__ = """model.diffusion_model."""
for key in keys:
if key.startswith(__lowerCAmelCase ):
lowerCamelCase__ = state_dict[key]
lowerCamelCase__ = config.model.params.first_stage_config.params
lowerCamelCase__ = config.model.params.unet_config.params
lowerCamelCase__ = VQModel(**__lowerCAmelCase ).eval()
vqvae.load_state_dict(__lowerCAmelCase )
lowerCamelCase__ = UNetLDMModel(**__lowerCAmelCase ).eval()
unet.load_state_dict(__lowerCAmelCase )
lowerCamelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , )
lowerCamelCase__ = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
pipeline.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
UpperCamelCase : List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 9 | 1 |
'''simple docstring'''
import argparse
import struct
import unittest
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ):
lowerCamelCase__ = data
# Initialize hash values
lowerCamelCase__ = [
0x6a_09_e6_67,
0xbb_67_ae_85,
0x3c_6e_f3_72,
0xa5_4f_f5_3a,
0x51_0e_52_7f,
0x9b_05_68_8c,
0x1f_83_d9_ab,
0x5b_e0_cd_19,
]
# Initialize round constants
lowerCamelCase__ = [
0x42_8a_2f_98,
0x71_37_44_91,
0xb5_c0_fb_cf,
0xe9_b5_db_a5,
0x39_56_c2_5b,
0x59_f1_11_f1,
0x92_3f_82_a4,
0xab_1c_5e_d5,
0xd8_07_aa_98,
0x12_83_5b_01,
0x24_31_85_be,
0x55_0c_7d_c3,
0x72_be_5d_74,
0x80_de_b1_fe,
0x9b_dc_06_a7,
0xc1_9b_f1_74,
0xe4_9b_69_c1,
0xef_be_47_86,
0x0f_c1_9d_c6,
0x24_0c_a1_cc,
0x2d_e9_2c_6f,
0x4a_74_84_aa,
0x5c_b0_a9_dc,
0x76_f9_88_da,
0x98_3e_51_52,
0xa8_31_c6_6d,
0xb0_03_27_c8,
0xbf_59_7f_c7,
0xc6_e0_0b_f3,
0xd5_a7_91_47,
0x06_ca_63_51,
0x14_29_29_67,
0x27_b7_0a_85,
0x2e_1b_21_38,
0x4d_2c_6d_fc,
0x53_38_0d_13,
0x65_0a_73_54,
0x76_6a_0a_bb,
0x81_c2_c9_2e,
0x92_72_2c_85,
0xa2_bf_e8_a1,
0xa8_1a_66_4b,
0xc2_4b_8b_70,
0xc7_6c_51_a3,
0xd1_92_e8_19,
0xd6_99_06_24,
0xf4_0e_35_85,
0x10_6a_a0_70,
0x19_a4_c1_16,
0x1e_37_6c_08,
0x27_48_77_4c,
0x34_b0_bc_b5,
0x39_1c_0c_b3,
0x4e_d8_aa_4a,
0x5b_9c_ca_4f,
0x68_2e_6f_f3,
0x74_8f_82_ee,
0x78_a5_63_6f,
0x84_c8_78_14,
0x8c_c7_02_08,
0x90_be_ff_fa,
0xa4_50_6c_eb,
0xbe_f9_a3_f7,
0xc6_71_78_f2,
]
lowerCamelCase__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCamelCase_ ( _lowerCAmelCase ):
lowerCamelCase__ = B"""\x80""" + (B"""\x00""" * (63 - (len(_lowerCAmelCase ) + 8) % 64))
lowerCamelCase__ = struct.pack(""">Q""" ,(len(_lowerCAmelCase ) * 8) )
return data + padding + big_endian_integer
def UpperCamelCase_ ( self ):
# Convert into blocks of 64 bytes
lowerCamelCase__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCamelCase__ = list(struct.unpack(""">16L""" ,_lowerCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCamelCase__ = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
lowerCamelCase__ = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
lowerCamelCase__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
lowerCamelCase__ = self.ror(_lowerCAmelCase ,6 ) ^ self.ror(_lowerCAmelCase ,11 ) ^ self.ror(_lowerCAmelCase ,25 )
lowerCamelCase__ = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g)
lowerCamelCase__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
lowerCamelCase__ = self.ror(_lowerCAmelCase ,2 ) ^ self.ror(_lowerCAmelCase ,13 ) ^ self.ror(_lowerCAmelCase ,22 )
lowerCamelCase__ = (a & b) ^ (a & c) ^ (b & c)
lowerCamelCase__ = (sa + maj) % 0x1_00_00_00_00
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
lowerCamelCase__ = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCamelCase__ = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
lowerCamelCase__ = """""".join([hex(_lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
import hashlib
lowerCamelCase__ = bytes("""Test String""" ,"""utf-8""" )
self.assertEqual(SHAaaa(_lowerCAmelCase ).hash ,hashlib.shaaaa(_lowerCAmelCase ).hexdigest() )
def A__ ( ):
import doctest
doctest.testmod()
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = bytes(__lowerCAmelCase , """utf-8""" )
print(SHAaaa(__lowerCAmelCase ).hash )
if __name__ == "__main__":
main()
| 9 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : tuple[int, ...] ):
lowerCamelCase__ = ""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
for keychar, cipherchar in zip(cycle(__lowerCAmelCase ) , __lowerCAmelCase ):
lowerCamelCase__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__lowerCAmelCase )
return decoded
def A__ ( __lowerCAmelCase : list[int] ):
lowerCamelCase__ = []
for key in product(__lowerCAmelCase , repeat=3 ):
lowerCamelCase__ = try_key(__lowerCAmelCase , __lowerCAmelCase )
if encoded is not None:
possibles.append(__lowerCAmelCase )
return possibles
def A__ ( __lowerCAmelCase : list[str] , __lowerCAmelCase : str ):
return [possible for possible in possibles if common_word in possible.lower()]
def A__ ( __lowerCAmelCase : str = "p059_cipher.txt" ):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = Path(__lowerCAmelCase ).parent.joinpath(__lowerCAmelCase ).read_text(encoding="""utf-8""" )
lowerCamelCase__ = [int(__lowerCAmelCase ) for number in data.strip().split(""",""" )]
lowerCamelCase__ = filter_valid_chars(__lowerCAmelCase )
for common_word in COMMON_WORDS:
lowerCamelCase__ = filter_common_word(__lowerCAmelCase , __lowerCAmelCase )
if len(__lowerCAmelCase ) == 1:
break
lowerCamelCase__ = possibles[0]
return sum(ord(__lowerCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 1 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = GPTSwaTokenizer
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = """This is a test"""
lowerCamelCase__ = """This is a test"""
return input_text, output_text
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """<s>"""
lowerCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""j""" )
self.assertEqual(len(_lowerCAmelCase ) ,20_00 )
def UpperCamelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,20_00 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[4_65, 2_87, 2_65, 6_31, 8_42] )
lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
_lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,)
# fmt: on
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase ,[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] ,)
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
# fmt: off
self.assertListEqual(
_lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def UpperCamelCase_ ( self ):
lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase )
lowerCamelCase__ = ["""This is a test""", """I was born in 92000, and this is falsé."""]
lowerCamelCase__ = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertListEqual(tokenizer.encode_fast(_lowerCAmelCase ) ,_lowerCAmelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertEqual(tokenizer.decode_fast(_lowerCAmelCase ) ,_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
lowerCamelCase__ = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=_lowerCAmelCase ,)
| 9 |
'''simple docstring'''
import argparse
import struct
import unittest
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ):
lowerCamelCase__ = data
# Initialize hash values
lowerCamelCase__ = [
0x6a_09_e6_67,
0xbb_67_ae_85,
0x3c_6e_f3_72,
0xa5_4f_f5_3a,
0x51_0e_52_7f,
0x9b_05_68_8c,
0x1f_83_d9_ab,
0x5b_e0_cd_19,
]
# Initialize round constants
lowerCamelCase__ = [
0x42_8a_2f_98,
0x71_37_44_91,
0xb5_c0_fb_cf,
0xe9_b5_db_a5,
0x39_56_c2_5b,
0x59_f1_11_f1,
0x92_3f_82_a4,
0xab_1c_5e_d5,
0xd8_07_aa_98,
0x12_83_5b_01,
0x24_31_85_be,
0x55_0c_7d_c3,
0x72_be_5d_74,
0x80_de_b1_fe,
0x9b_dc_06_a7,
0xc1_9b_f1_74,
0xe4_9b_69_c1,
0xef_be_47_86,
0x0f_c1_9d_c6,
0x24_0c_a1_cc,
0x2d_e9_2c_6f,
0x4a_74_84_aa,
0x5c_b0_a9_dc,
0x76_f9_88_da,
0x98_3e_51_52,
0xa8_31_c6_6d,
0xb0_03_27_c8,
0xbf_59_7f_c7,
0xc6_e0_0b_f3,
0xd5_a7_91_47,
0x06_ca_63_51,
0x14_29_29_67,
0x27_b7_0a_85,
0x2e_1b_21_38,
0x4d_2c_6d_fc,
0x53_38_0d_13,
0x65_0a_73_54,
0x76_6a_0a_bb,
0x81_c2_c9_2e,
0x92_72_2c_85,
0xa2_bf_e8_a1,
0xa8_1a_66_4b,
0xc2_4b_8b_70,
0xc7_6c_51_a3,
0xd1_92_e8_19,
0xd6_99_06_24,
0xf4_0e_35_85,
0x10_6a_a0_70,
0x19_a4_c1_16,
0x1e_37_6c_08,
0x27_48_77_4c,
0x34_b0_bc_b5,
0x39_1c_0c_b3,
0x4e_d8_aa_4a,
0x5b_9c_ca_4f,
0x68_2e_6f_f3,
0x74_8f_82_ee,
0x78_a5_63_6f,
0x84_c8_78_14,
0x8c_c7_02_08,
0x90_be_ff_fa,
0xa4_50_6c_eb,
0xbe_f9_a3_f7,
0xc6_71_78_f2,
]
lowerCamelCase__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCamelCase_ ( _lowerCAmelCase ):
lowerCamelCase__ = B"""\x80""" + (B"""\x00""" * (63 - (len(_lowerCAmelCase ) + 8) % 64))
lowerCamelCase__ = struct.pack(""">Q""" ,(len(_lowerCAmelCase ) * 8) )
return data + padding + big_endian_integer
def UpperCamelCase_ ( self ):
# Convert into blocks of 64 bytes
lowerCamelCase__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCamelCase__ = list(struct.unpack(""">16L""" ,_lowerCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCamelCase__ = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
lowerCamelCase__ = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
lowerCamelCase__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
lowerCamelCase__ = self.ror(_lowerCAmelCase ,6 ) ^ self.ror(_lowerCAmelCase ,11 ) ^ self.ror(_lowerCAmelCase ,25 )
lowerCamelCase__ = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g)
lowerCamelCase__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
lowerCamelCase__ = self.ror(_lowerCAmelCase ,2 ) ^ self.ror(_lowerCAmelCase ,13 ) ^ self.ror(_lowerCAmelCase ,22 )
lowerCamelCase__ = (a & b) ^ (a & c) ^ (b & c)
lowerCamelCase__ = (sa + maj) % 0x1_00_00_00_00
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
lowerCamelCase__ = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCamelCase__ = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
lowerCamelCase__ = """""".join([hex(_lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
import hashlib
lowerCamelCase__ = bytes("""Test String""" ,"""utf-8""" )
self.assertEqual(SHAaaa(_lowerCAmelCase ).hash ,hashlib.shaaaa(_lowerCAmelCase ).hexdigest() )
def A__ ( ):
import doctest
doctest.testmod()
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = bytes(__lowerCAmelCase , """utf-8""" )
print(SHAaaa(__lowerCAmelCase ).hash )
if __name__ == "__main__":
main()
| 9 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.