code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=3 , __lowerCAmelCase=3_2 , __lowerCAmelCase=3 , __lowerCAmelCase=1_0 , __lowerCAmelCase=[8, 1_6, 3_2, 6_4] , __lowerCAmelCase=[1, 1, 2, 1] , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="relu" , __lowerCAmelCase=3 , __lowerCAmelCase=None , __lowerCAmelCase=["stage2", "stage3", "stage4"] , __lowerCAmelCase=[2, 3, 4] , __lowerCAmelCase=1 , ):
"""simple docstring"""
__magic_name__ :Optional[int] = parent
__magic_name__ :str = batch_size
__magic_name__ :List[Any] = image_size
__magic_name__ :Any = num_channels
__magic_name__ :Optional[Any] = embeddings_size
__magic_name__ :Any = hidden_sizes
__magic_name__ :Tuple = depths
__magic_name__ :Any = is_training
__magic_name__ :Union[str, Any] = use_labels
__magic_name__ :List[str] = hidden_act
__magic_name__ :Optional[int] = num_labels
__magic_name__ :Any = scope
__magic_name__ :Dict = len(__lowerCAmelCase )
__magic_name__ :Any = out_features
__magic_name__ :str = out_indices
__magic_name__ :Tuple = num_groups
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ :Any = None
if self.use_labels:
__magic_name__ :str = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ :Optional[int] = self.get_config()
return config, pixel_values, labels
def A ( self ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = BitModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.num_labels
__magic_name__ :int = BitForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :str = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[Any] = BitBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :Optional[int] = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__magic_name__ :Any = None
__magic_name__ :Union[str, Any] = BitBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ :int = config_and_inputs
__magic_name__ :str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = BitModelTester(self )
__magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self ):
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def A ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def A ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ :int = model_class(__lowerCAmelCase )
__magic_name__ :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ :int = [*signature.parameters.keys()]
__magic_name__ :Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ :Tuple = model_class(config=__lowerCAmelCase )
for name, module in model.named_modules():
if isinstance(__lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A ( self ):
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
__magic_name__ :Optional[int] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
__magic_name__ :Tuple = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
__magic_name__ :List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__magic_name__ :Dict = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__magic_name__ , __magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ :Tuple = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__magic_name__ :int = layer_type
__magic_name__ :Tuple = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ :Tuple = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ :List[str] = BitModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def A ( self ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = self.default_image_processor
__magic_name__ :int = prepare_img()
__magic_name__ :Tuple = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
__magic_name__ :Tuple = model(**__lowerCAmelCase )
# verify the logits
__magic_name__ :Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
__magic_name__ :int = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
@require_torch
class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ):
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = BitModelTester(self )
| 0 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
a_ = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
a_ = [element for element in set_a if element in set_b]
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
a_ = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
__lowerCAmelCase ={"a", "b", "c", "d", "e"}
__lowerCAmelCase ={"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 697 | 0 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = '''▁'''
__snake_case = {'''vocab_file''': '''prophetnet.tokenizer'''}
__snake_case = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
__snake_case = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
__snake_case = {
'''microsoft/xprophetnet-large-wiki100-cased''': 5_1_2,
}
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = collections.OrderedDict()
with open(_lowercase , 'r' , encoding='utf-8' ) as reader:
__UpperCamelCase = reader.readlines()
for index, token in enumerate(_lowercase ):
__UpperCamelCase = token.rstrip('\n' )
__UpperCamelCase = index
return vocab
class __lowerCamelCase (_a ):
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self: str,A_: int,A_: str="[SEP]",A_: List[Any]="[SEP]",A_: str="[SEP]",A_: Any="[UNK]",A_: Optional[int]="[PAD]",A_: List[str]="[CLS]",A_: Dict="[MASK]",A_: Optional[Dict[str, Any]] = None,**A_: str,):
'''simple docstring'''
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_,eos_token=A_,sep_token=A_,unk_token=A_,pad_token=A_,cls_token=A_,mask_token=A_,sp_model_kwargs=self.sp_model_kwargs,**A_,)
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
__UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__UpperCamelCase = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(10 ):
__UpperCamelCase = F'''[unused{i}]'''
__UpperCamelCase = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__UpperCamelCase = 12
__UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(A_ )
def __getstate__( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
return state
def __setstate__( self: List[Any],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
# for backward compatibility
if not hasattr(self,'sp_model_kwargs' ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self: Any,A_: List[int],A_: Optional[List[int]] = None,A_: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_,token_ids_a=A_,already_has_special_tokens=A_ )
if token_ids_a is None:
return ([0] * len(A_ )) + [1]
return ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
def snake_case_ ( self: Optional[int],A_: List[int],A_: Optional[List[int]] = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self: List[Any],A_: str ):
'''simple docstring'''
return self.sp_model.encode(A_,out_type=A_ )
def snake_case_ ( self: Any,A_: Optional[int] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCamelCase = self.sp_model.PieceToId(A_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self: str,A_: int ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self: Tuple,A_: int ):
'''simple docstring'''
__UpperCamelCase = ''.join(A_ ).replace(A_,' ' ).strip()
return out_string
def snake_case_ ( self: Optional[int],A_: str,A_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCamelCase = os.path.join(
A_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_,'wb' ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def snake_case_ ( self: Tuple,A_: List[int],A_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__UpperCamelCase = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 1 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
a_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 0 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 3 ) -> qiskit.result.counts.Counts:
if isinstance(_snake_case , _snake_case ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_snake_case ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
_A = QuantumRegister(_snake_case , '''qr''' )
_A = ClassicalRegister(_snake_case , '''cr''' )
_A = QuantumCircuit(_snake_case , _snake_case )
_A = number_of_qubits
for i in range(_snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_snake_case , _snake_case )
# simulate with 10000 shots
_A = Aer.get_backend('''qasm_simulator''' )
_A = execute(_snake_case , _snake_case , shots=10_000 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 2 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _snake_case ( snake_case ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> str:
super().__init__(
features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = Generator(
cache_dir=UpperCAmelCase__ , features=UpperCAmelCase__ , generator=UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
a_ = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
a_ = None
a_ = None
a_ = None
a_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , )
a_ = self.builder.as_dataset(
split='train' , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 697 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Any = {
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = ['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase =numpy.array([0, 0])
__lowerCAmelCase =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase =numpy.array([1, 0])
__lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = initial_vectors
for _ in range(_UpperCAmelCase ):
a_ = iteration_step(_UpperCAmelCase )
return vectors
def a ( _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = []
for i, start_vector in enumerate(vectors[:-1] ):
a_ = vectors[i + 1]
new_vectors.append(_UpperCAmelCase )
a_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray:
"""simple docstring"""
a_ = numpy.radians(_UpperCAmelCase )
a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase )
a_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCAmelCase , _UpperCAmelCase )
def a ( _UpperCAmelCase ) -> None:
"""simple docstring"""
a_ = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a_ , a_ = zip(*_UpperCAmelCase )
plt.plot(_UpperCAmelCase , _UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 697 | 0 |
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__UpperCamelCase : int = TypeVar('''T''')
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return (position - 1) // 2
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return (2 * position) + 1
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return (2 * position) + 2
class a ( Generic[T] ):
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = {}
lowerCAmelCase = 0
def __len__( self ):
"""simple docstring"""
return self.elements
def __repr__( self ):
"""simple docstring"""
return str(self.heap )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.elements == 0
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
self.heap.append((elem, weight) )
lowerCAmelCase = self.elements
self.elements += 1
self._bubble_up(_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowerCAmelCase ,lowerCAmelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowerCAmelCase ,lowerCAmelCase = self.heap[0]
self._bubble_down(_snake_case )
return elem
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.position_map[elem]
lowerCAmelCase = (elem, weight)
if position > 0:
lowerCAmelCase = get_parent_position(_snake_case )
lowerCAmelCase ,lowerCAmelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_snake_case )
else:
self._bubble_down(_snake_case )
else:
self._bubble_down(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.position_map[elem]
if curr_pos == 0:
return None
lowerCAmelCase = get_parent_position(_snake_case )
lowerCAmelCase ,lowerCAmelCase = self.heap[curr_pos]
lowerCAmelCase ,lowerCAmelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_snake_case , _snake_case )
return self._bubble_up(_snake_case )
return None
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.position_map[elem]
lowerCAmelCase ,lowerCAmelCase = self.heap[curr_pos]
lowerCAmelCase = get_child_left_position(_snake_case )
lowerCAmelCase = get_child_right_position(_snake_case )
if child_left_position < self.elements and child_right_position < self.elements:
lowerCAmelCase ,lowerCAmelCase = self.heap[child_left_position]
lowerCAmelCase ,lowerCAmelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_snake_case , _snake_case )
return self._bubble_down(_snake_case )
if child_left_position < self.elements:
lowerCAmelCase ,lowerCAmelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_snake_case , _snake_case )
return self._bubble_down(_snake_case )
else:
return None
if child_right_position < self.elements:
lowerCAmelCase ,lowerCAmelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_snake_case , _snake_case )
return self._bubble_down(_snake_case )
return None
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.heap[nodea_pos][0]
lowerCAmelCase = self.heap[nodea_pos][0]
lowerCAmelCase ,lowerCAmelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowerCAmelCase = nodea_pos
lowerCAmelCase = nodea_pos
class a ( Generic[T] ):
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = {}
lowerCAmelCase = 0
def __repr__( self ):
"""simple docstring"""
return str(self.connections )
def __len__( self ):
"""simple docstring"""
return self.nodes
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if node not in self.connections:
lowerCAmelCase = {}
self.nodes += 1
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
self.add_node(_snake_case )
self.add_node(_snake_case )
lowerCAmelCase = weight
lowerCAmelCase = weight
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : GraphUndirectedWeighted[T] , ):
lowerCAmelCase = {node: maxsize for node in graph.connections}
lowerCAmelCase = {node: None for node in graph.connections}
lowerCAmelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_UpperCAmelCase , _UpperCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
lowerCAmelCase = priority_queue.extract_min()
lowerCAmelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_UpperCAmelCase , dist[neighbour] )
lowerCAmelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
lowerCAmelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_UpperCAmelCase , dist[neighbour] )
lowerCAmelCase = node
return dist, parent
| 4 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def a ( _UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for char in word:
a_ = ord(_UpperCAmelCase )
if not _is_chinese_char(_UpperCAmelCase ):
return 0
return 1
def a ( _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
a_ = set()
for token in tokens:
a_ = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase )
if chinese_word:
word_set.add(_UpperCAmelCase )
a_ = list(_UpperCAmelCase )
return word_list
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
a_ = max([len(_UpperCAmelCase ) for w in chinese_word_set] )
a_ = bert_tokens
a_ , a_ = 0, len(_UpperCAmelCase )
while start < end:
a_ = True
if is_chinese(bert_word[start] ):
a_ = min(end - start , _UpperCAmelCase )
for i in range(_UpperCAmelCase , 1 , -1 ):
a_ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a_ = '##' + bert_word[j]
a_ = start + i
a_ = False
break
if single_word:
start += 1
return bert_word
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
"""simple docstring"""
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws
a_ = [get_chinese_word(_UpperCAmelCase ) for r in res]
ltp_res.extend(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ):
a_ = []
for id in input_ids:
a_ = bert_tokenizer._convert_id_to_token(_UpperCAmelCase )
input_tokens.append(_UpperCAmelCase )
a_ = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase )
a_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCAmelCase ):
if token[:2] == "##":
a_ = token[2:]
# save chinese tokens' pos
if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ):
ref_id.append(_UpperCAmelCase )
ref_ids.append(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
return ref_ids
def a ( _UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
a_ = f.readlines()
a_ = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a_ = LTP(args.ltp ) # faster in GPU device
a_ = BertTokenizer.from_pretrained(args.bert )
a_ = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
a_ = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__lowerCAmelCase =parser.parse_args()
main(args)
| 697 | 0 |
'''simple docstring'''
import argparse
import struct
import unittest
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = data
# Initialize hash values
_lowerCAmelCase = [
0x6_A_0_9_E_6_6_7,
0xB_B_6_7_A_E_8_5,
0x3_C_6_E_F_3_7_2,
0xA_5_4_F_F_5_3_A,
0x5_1_0_E_5_2_7_F,
0x9_B_0_5_6_8_8_C,
0x1_F_8_3_D_9_A_B,
0x5_B_E_0_C_D_1_9,
]
# Initialize round constants
_lowerCAmelCase = [
0x4_2_8_A_2_F_9_8,
0x7_1_3_7_4_4_9_1,
0xB_5_C_0_F_B_C_F,
0xE_9_B_5_D_B_A_5,
0x3_9_5_6_C_2_5_B,
0x5_9_F_1_1_1_F_1,
0x9_2_3_F_8_2_A_4,
0xA_B_1_C_5_E_D_5,
0xD_8_0_7_A_A_9_8,
0x1_2_8_3_5_B_0_1,
0x2_4_3_1_8_5_B_E,
0x5_5_0_C_7_D_C_3,
0x7_2_B_E_5_D_7_4,
0x8_0_D_E_B_1_F_E,
0x9_B_D_C_0_6_A_7,
0xC_1_9_B_F_1_7_4,
0xE_4_9_B_6_9_C_1,
0xE_F_B_E_4_7_8_6,
0x0_F_C_1_9_D_C_6,
0x2_4_0_C_A_1_C_C,
0x2_D_E_9_2_C_6_F,
0x4_A_7_4_8_4_A_A,
0x5_C_B_0_A_9_D_C,
0x7_6_F_9_8_8_D_A,
0x9_8_3_E_5_1_5_2,
0xA_8_3_1_C_6_6_D,
0xB_0_0_3_2_7_C_8,
0xB_F_5_9_7_F_C_7,
0xC_6_E_0_0_B_F_3,
0xD_5_A_7_9_1_4_7,
0x0_6_C_A_6_3_5_1,
0x1_4_2_9_2_9_6_7,
0x2_7_B_7_0_A_8_5,
0x2_E_1_B_2_1_3_8,
0x4_D_2_C_6_D_F_C,
0x5_3_3_8_0_D_1_3,
0x6_5_0_A_7_3_5_4,
0x7_6_6_A_0_A_B_B,
0x8_1_C_2_C_9_2_E,
0x9_2_7_2_2_C_8_5,
0xA_2_B_F_E_8_A_1,
0xA_8_1_A_6_6_4_B,
0xC_2_4_B_8_B_7_0,
0xC_7_6_C_5_1_A_3,
0xD_1_9_2_E_8_1_9,
0xD_6_9_9_0_6_2_4,
0xF_4_0_E_3_5_8_5,
0x1_0_6_A_A_0_7_0,
0x1_9_A_4_C_1_1_6,
0x1_E_3_7_6_C_0_8,
0x2_7_4_8_7_7_4_C,
0x3_4_B_0_B_C_B_5,
0x3_9_1_C_0_C_B_3,
0x4_E_D_8_A_A_4_A,
0x5_B_9_C_C_A_4_F,
0x6_8_2_E_6_F_F_3,
0x7_4_8_F_8_2_E_E,
0x7_8_A_5_6_3_6_F,
0x8_4_C_8_7_8_1_4,
0x8_C_C_7_0_2_0_8,
0x9_0_B_E_F_F_F_A,
0xA_4_5_0_6_C_E_B,
0xB_E_F_9_A_3_F_7,
0xC_6_7_1_7_8_F_2,
]
_lowerCAmelCase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _lowercase ( _lowercase ):
"""simple docstring"""
_lowerCAmelCase = B"""\x80""" + (B"""\x00""" * (63 - (len(_lowercase ) + 8) % 64))
_lowerCAmelCase = struct.pack(""">Q""" , (len(_lowercase ) * 8) )
return data + padding + big_endian_integer
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCAmelCase = list(struct.unpack(""">16L""" , _lowercase ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCAmelCase = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
_lowerCAmelCase = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
_lowerCAmelCase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0_0_0_0_0_0_0_0
# Compression
_lowerCAmelCase = self.ror(_lowercase , 6 ) ^ self.ror(_lowercase , 11 ) ^ self.ror(_lowercase , 25 )
_lowerCAmelCase = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g)
_lowerCAmelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0_0_0_0_0_0_0_0
_lowerCAmelCase = self.ror(_lowercase , 2 ) ^ self.ror(_lowercase , 13 ) ^ self.ror(_lowercase , 22 )
_lowerCAmelCase = (a & b) ^ (a & c) ^ (b & c)
_lowerCAmelCase = (sa + maj) % 0x1_0_0_0_0_0_0_0_0
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = (
g,
f,
e,
((d + tempa) % 0x1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0),
)
_lowerCAmelCase = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCAmelCase = [
((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
_lowerCAmelCase = """""".join([hex(_lowercase )[2:].zfill(8 ) for value in self.hashes] )
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
import hashlib
_lowerCAmelCase = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(_lowercase ).hash , hashlib.shaaaa(_lowercase ).hexdigest() )
def A ():
import doctest
doctest.testmod()
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
_lowerCAmelCase = f.read()
else:
_lowerCAmelCase = bytes(__lowerCamelCase , """utf-8""" )
print(SHAaaa(__lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 5 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": 512,
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = RetriBertTokenizer
_UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__="[UNK]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[PAD]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> int:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
a_ = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) )
a_ = do_lower_case
a_ = strip_accents
a_ = tokenize_chinese_chars
a_ = normalizer_class(**UpperCAmelCase__ )
a_ = do_lower_case
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str:
a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]:
a_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 697 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = UnCLIPImageVariationPipeline
lowerCamelCase_ = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
lowerCamelCase_ = IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase_ = [
"generator",
"return_dict",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
lowerCamelCase_ = False
@property
def _snake_case ( self :List[str] ) -> Tuple:
"""simple docstring"""
return 32
@property
def _snake_case ( self :List[Any] ) -> Tuple:
"""simple docstring"""
return 32
@property
def _snake_case ( self :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.time_input_dim
@property
def _snake_case ( self :List[str] ) -> Dict:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def _snake_case ( self :int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def _snake_case ( self :int ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__A )
@property
def _snake_case ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__A )
@property
def _snake_case ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
"""clip_embeddings_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""cross_attention_dim""": self.cross_attention_dim,
}
SCREAMING_SNAKE_CASE__ = UnCLIPTextProjModel(**__A )
return model
@property
def _snake_case ( self :Union[str, Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
"""sample_size""": 32,
# RGB in channels
"""in_channels""": 3,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 6,
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": """identity""",
}
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(**__A )
return model
@property
def _snake_case ( self :List[str] ) -> Dict:
"""simple docstring"""
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _snake_case ( self :Tuple ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _snake_case ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(1 )
SCREAMING_SNAKE_CASE__ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _snake_case ( self :str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.dummy_decoder
SCREAMING_SNAKE_CASE__ = self.dummy_text_proj
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ = self.dummy_super_res_first
SCREAMING_SNAKE_CASE__ = self.dummy_super_res_last
SCREAMING_SNAKE_CASE__ = UnCLIPScheduler(
variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
SCREAMING_SNAKE_CASE__ = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
SCREAMING_SNAKE_CASE__ = CLIPImageProcessor(crop_size=32 , size=32 )
SCREAMING_SNAKE_CASE__ = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _snake_case ( self :Union[str, Any] , __A :Optional[int] , __A :List[str]=0 , __A :str=True ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__A )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__A ).manual_seed(__A )
if pil_image:
SCREAMING_SNAKE_CASE__ = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE__ = input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE__ = DiffusionPipeline.numpy_to_pil(__A )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _snake_case ( self :str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """cpu"""
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__A )
SCREAMING_SNAKE_CASE__ = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__A , pil_image=__A )
SCREAMING_SNAKE_CASE__ = pipe(**__A )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__A , pil_image=__A )
SCREAMING_SNAKE_CASE__ = pipe(
**__A , return_dict=__A , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self :List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """cpu"""
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__A )
SCREAMING_SNAKE_CASE__ = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__A , pil_image=__A )
SCREAMING_SNAKE_CASE__ = pipe(**__A )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__A , pil_image=__A )
SCREAMING_SNAKE_CASE__ = pipe(
**__A , return_dict=__A , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self :List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """cpu"""
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__A )
SCREAMING_SNAKE_CASE__ = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__A , pil_image=__A )
SCREAMING_SNAKE_CASE__ = [
pipeline_inputs["""image"""],
pipeline_inputs["""image"""],
]
SCREAMING_SNAKE_CASE__ = pipe(**__A )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__A , pil_image=__A )
SCREAMING_SNAKE_CASE__ = [
tuple_pipeline_inputs["""image"""],
tuple_pipeline_inputs["""image"""],
]
SCREAMING_SNAKE_CASE__ = pipe(
**__A , return_dict=__A , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = torch.device("""cpu""" )
class UpperCamelCase_ :
lowerCamelCase_ = 1
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__A )
SCREAMING_SNAKE_CASE__ = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__A ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe.decoder.dtype
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
SCREAMING_SNAKE_CASE__ = pipe.prepare_latents(
__A , dtype=__A , device=__A , generator=__A , latents=__A , scheduler=DummyScheduler() )
SCREAMING_SNAKE_CASE__ = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
SCREAMING_SNAKE_CASE__ = pipe.prepare_latents(
__A , dtype=__A , device=__A , generator=__A , latents=__A , scheduler=DummyScheduler() )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__A , pil_image=__A )
SCREAMING_SNAKE_CASE__ = pipe(
**__A , decoder_latents=__A , super_res_latents=__A ).images
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__A , pil_image=__A )
# Don't pass image, instead pass embedding
SCREAMING_SNAKE_CASE__ = pipeline_inputs.pop("""image""" )
SCREAMING_SNAKE_CASE__ = pipe.image_encoder(__A ).image_embeds
SCREAMING_SNAKE_CASE__ = pipe(
**__A , decoder_latents=__A , super_res_latents=__A , image_embeddings=__A , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _snake_case ( self :Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = torch_device == """cpu"""
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
SCREAMING_SNAKE_CASE__ = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__A , expected_max_diff=__A )
@skip_mps
def _snake_case ( self :Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = torch_device == """cpu"""
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
self._test_inference_batch_single_identical(
test_max_difference=__A , relax_max_difference=__A , additional_params_copy_to_batched_inputs=__A , )
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
SCREAMING_SNAKE_CASE__ = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__A , additional_params_copy_to_batched_inputs=__A , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__A )
@skip_mps
def _snake_case ( self :Optional[int] ) -> List[str]:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _snake_case ( self :str ) -> int:
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :List[str] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self :int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""" )
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/unclip/karlo_v1_alpha_cat_variation_fp16.npy""" )
SCREAMING_SNAKE_CASE__ = UnCLIPImageVariationPipeline.from_pretrained(
"""kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipeline(
__A , generator=__A , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__A , __A , 15 ) | 6 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "lilt"
def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__="absolute" , UpperCAmelCase__=None , UpperCAmelCase__=4 , UpperCAmelCase__=1024 , **UpperCAmelCase__ , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = classifier_dropout
a_ = channel_shrink_ratio
a_ = max_ad_position_embeddings
| 697 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
return " ".join(
''.join(word[::-1] ) if len(_snake_case ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 7 |
'''simple docstring'''
from __future__ import annotations
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# We need to create solution object to save path.
a_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
a_ = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase )
if solved:
print('\n'.join(str(_UpperCAmelCase ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
a_ = 1
return True
a_ = (not i < 0) and (not j < 0) # Check lower bounds
a_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
a_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
a_ = 1
# check for directions
if (
run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase )
):
return True
a_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : List[str] = {'''vocab_file''': '''vocab.json'''}
lowercase__ : Union[str, Any] = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
lowercase__ : Optional[int] = {'''mgp-str''': 27}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="[GO]" , _UpperCAmelCase="[GO]" , _UpperCAmelCase="[s]" , _UpperCAmelCase="[GO]" , **_UpperCAmelCase):
'''simple docstring'''
super().__init__(
unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding='utf-8') as vocab_handle:
__A : Union[str, Any] = json.load(_UpperCAmelCase)
__A : str = {v: k for k, v in self.vocab.items()}
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return len(self.vocab)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = []
for s in text:
char_tokens.extend(_UpperCAmelCase)
return char_tokens
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.vocab.get(_UpperCAmelCase , self.vocab.get(self.unk_token))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase):
logger.error('Vocabulary path ({}) should be a directory'.format(_UpperCAmelCase))
return
__A : int = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase) + '\n')
return (vocab_file,) | 8 |
'''simple docstring'''
__lowerCAmelCase ={
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
__lowerCAmelCase ={
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
"""simple docstring"""
a_ = from_type.lower().strip('s' )
a_ = to_type.lower().strip('s' )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
a_ = METRIC_CONVERSION[from_sanitized]
a_ = METRIC_CONVERSION[to_sanitized]
a_ = 1
if from_exponent > to_exponent:
a_ = from_exponent - to_exponent
else:
a_ = -(to_exponent - from_exponent)
return value * pow(1_0 , _UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE__ = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def A ( __UpperCamelCase=None ) -> Union[str, Any]:
if subparsers is not None:
A__ = subparsers.add_parser('tpu-config' , description=_description )
else:
A__ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
A__ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=__UpperCamelCase , default=__UpperCamelCase , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=__UpperCamelCase , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=__UpperCamelCase , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
A__ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=__UpperCamelCase , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def A ( __UpperCamelCase ) -> Optional[Any]:
A__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__UpperCamelCase ):
A__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
A__ = defaults.command_file
if not args.command and defaults.commands is not None:
A__ = defaults.commands
if not args.tpu_name:
A__ = defaults.tpu_name
if not args.tpu_zone:
A__ = defaults.tpu_zone
if args.accelerate_version == "dev":
A__ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
A__ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , __UpperCamelCase ):
A__ = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
A__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __UpperCamelCase ):
A__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
A__ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
A__ = '; '.join(__UpperCamelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
A__ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(__UpperCamelCase )}''' )
return
subprocess.run(__UpperCamelCase )
print('Successfully setup pod.' )
def A ( ) -> Optional[Any]:
A__ = tpu_command_parser()
A__ = parser.parse_args()
tpu_command_launcher(__UpperCamelCase )
| 9 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__lowerCAmelCase ="naver-clova-ix/donut-base"
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = DonutProcessor.from_pretrained(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
a_ = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
a_ = self.processor.tokenajson(UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self : Any , _A : int , _A : int=12 , _A : int=7 , _A : Tuple=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : str=99 , _A : str=32 , _A : int=32 , _A : Optional[Any]=2 , _A : Dict=4 , _A : int=37 , _A : List[Any]=0.1 , _A : str=0.1 , _A : Any=512 , _A : int=0.02 , _A : Optional[Any]=0 , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = projection_dim
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = bos_token_id
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCamelCase = input_mask.numpy()
_UpperCamelCase , _UpperCamelCase = input_mask.shape
_UpperCamelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(_A )
def UpperCamelCase_ ( self : str ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : str , _A : Optional[Any] ):
_UpperCamelCase = TFBlipTextModel(config=_A )
_UpperCamelCase = model(_A , attention_mask=_A , training=_A )
_UpperCamelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = BlipTextModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : List[str] ):
pass
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFBlipTextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCamelCase_ ( self : int , _A : Optional[int]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
| 10 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="resnet50" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , ) -> Optional[Any]:
a_ = parent
a_ = out_indices if out_indices is not None else [4]
a_ = stage_names
a_ = out_features
a_ = backbone
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = use_pretrained_backbone
a_ = is_training
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = TimmBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
a_ = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = self.prepare_config_and_inputs()
a_ , a_ = config_and_inputs
a_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (TimmBackbone,) if is_torch_available() else ()
_UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = TimmBackboneModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = 'resnet18'
a_ = 'microsoft/resnet-18'
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ , out_indices=[1, 2, 3] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
a_ = self.has_attentions
# no need to test all models as different heads yield the same functionality
a_ = self.all_model_classes[0]
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = model(**UpperCAmelCase__ )
a_ = outputs[0][-1]
# Encoder-/Decoder-only models
a_ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a_ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = None
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = False
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
| 697 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
_a = 2
_a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A)
if n > 1:
factors.append(__A)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , ) -> List[Any]:
a_ = size if size is not None else {'height': 18, 'width': 18}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = image_size
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = apply_ocr
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# with apply_OCR = True
a_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
a_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
a_ = Image.open(ds[0]['file'] ).convert('RGB' )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
a_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
a_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 697 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ : Union[str, Any] = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["""ChineseCLIPFeatureExtractor"""]
lowerCamelCase__ : Optional[int] = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12 |
'''simple docstring'''
import math
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( _UpperCAmelCase = 1_0_0_0_1 ) -> int:
"""simple docstring"""
try:
a_ = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
a_ = []
a_ = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 0 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=0.999 , UpperCAmelCase_ : Dict="cosine" , ) -> List[str]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCAmelCase_ : Dict ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCAmelCase_ : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__lowerCamelCase : List[str] = []
for i in range(UpperCAmelCase_ ):
__lowerCamelCase : Any = i / num_diffusion_timesteps
__lowerCamelCase : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase_ ) / alpha_bar_fn(UpperCAmelCase_ ) , UpperCAmelCase_ ) )
return torch.tensor(UpperCAmelCase_ , dtype=torch.floataa )
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = [e.name for e in KarrasDiffusionSchedulers]
lowerCamelCase : List[str] = 2
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE_ = 10_00 , SCREAMING_SNAKE_CASE_ = 0.0_0_0_8_5 , SCREAMING_SNAKE_CASE_ = 0.0_1_2 , SCREAMING_SNAKE_CASE_ = "linear" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "epsilon" , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = "linspace" , SCREAMING_SNAKE_CASE_ = 0 , ) -> Dict:
if trained_betas is not None:
__lowerCamelCase : Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCamelCase : Union[str, Any] = torch.linspace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCamelCase : Any = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCamelCase : Optional[int] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
__lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
__lowerCamelCase : Dict = 1.0 - self.betas
__lowerCamelCase : Dict = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = use_karras_sigmas
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[str]:
if schedule_timesteps is None:
__lowerCamelCase : Dict = self.timesteps
__lowerCamelCase : Any = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCamelCase : List[str] = 1 if len(SCREAMING_SNAKE_CASE_ ) > 1 else 0
else:
__lowerCamelCase : Optional[Any] = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
__lowerCamelCase : Union[str, Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowercase_ ( self ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> torch.FloatTensor:
__lowerCamelCase : Dict = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = self.sigmas[step_index]
__lowerCamelCase : List[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ) -> str:
__lowerCamelCase : Optional[int] = num_inference_steps
__lowerCamelCase : List[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCamelCase : List[Any] = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCamelCase : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase : Optional[Any] = (np.arange(0 , SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCamelCase : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase : List[str] = (np.arange(SCREAMING_SNAKE_CASE_ , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__lowerCamelCase : Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCamelCase : Dict = np.log(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = np.interp(SCREAMING_SNAKE_CASE_ , np.arange(0 , len(SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
if self.config.use_karras_sigmas:
__lowerCamelCase : Any = self._convert_to_karras(in_sigmas=SCREAMING_SNAKE_CASE_ , num_inference_steps=self.num_inference_steps )
__lowerCamelCase : List[str] = np.array([self._sigma_to_t(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for sigma in sigmas] )
__lowerCamelCase : List[Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCamelCase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCamelCase : str = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
# mps does not support float64
__lowerCamelCase : int = timesteps.to(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
else:
__lowerCamelCase : Optional[int] = timesteps.to(device=SCREAMING_SNAKE_CASE_ )
# empty dt and derivative
__lowerCamelCase : Tuple = None
__lowerCamelCase : str = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCamelCase : int = defaultdict(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# get log sigma
__lowerCamelCase : str = np.log(SCREAMING_SNAKE_CASE_ )
# get distribution
__lowerCamelCase : Optional[Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__lowerCamelCase : int = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__lowerCamelCase : Optional[Any] = low_idx + 1
__lowerCamelCase : Optional[Any] = log_sigmas[low_idx]
__lowerCamelCase : Optional[int] = log_sigmas[high_idx]
# interpolate sigmas
__lowerCamelCase : Union[str, Any] = (low - log_sigma) / (low - high)
__lowerCamelCase : Optional[Any] = np.clip(SCREAMING_SNAKE_CASE_ , 0 , 1 )
# transform interpolation to time range
__lowerCamelCase : List[str] = (1 - w) * low_idx + w * high_idx
__lowerCamelCase : str = t.reshape(sigma.shape )
return t
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> torch.FloatTensor:
__lowerCamelCase : float = in_sigmas[-1].item()
__lowerCamelCase : float = in_sigmas[0].item()
__lowerCamelCase : int = 7.0 # 7.0 is the value used in the paper
__lowerCamelCase : int = np.linspace(0 , 1 , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = sigma_min ** (1 / rho)
__lowerCamelCase : List[Any] = sigma_max ** (1 / rho)
__lowerCamelCase : Union[str, Any] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowercase_ ( self ) -> Any:
return self.dt is None
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ) -> Union[SchedulerOutput, Tuple]:
__lowerCamelCase : Union[str, Any] = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
# advance index counter by 1
__lowerCamelCase : Any = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCamelCase : List[Any] = self.sigmas[step_index]
__lowerCamelCase : Optional[int] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__lowerCamelCase : str = self.sigmas[step_index - 1]
__lowerCamelCase : int = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCamelCase : List[str] = 0
__lowerCamelCase : Any = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCamelCase : List[str] = sigma_hat if self.state_in_first_order else sigma_next
__lowerCamelCase : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase : List[str] = sigma_hat if self.state_in_first_order else sigma_next
__lowerCamelCase : Optional[int] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__lowerCamelCase : Any = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
__lowerCamelCase : Any = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCamelCase : str = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCamelCase : str = sigma_next - sigma_hat
# store for 2nd order step
__lowerCamelCase : str = derivative
__lowerCamelCase : Any = dt
__lowerCamelCase : Any = sample
else:
# 2. 2nd order / Heun's method
__lowerCamelCase : Optional[Any] = (sample - pred_original_sample) / sigma_next
__lowerCamelCase : int = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__lowerCamelCase : Optional[int] = self.dt
__lowerCamelCase : Dict = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__lowerCamelCase : Dict = None
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase : int = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase : Optional[int] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
# mps does not support float64
__lowerCamelCase : str = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCamelCase : Union[str, Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCamelCase : Dict = self.timesteps.to(original_samples.device )
__lowerCamelCase : Any = timesteps.to(original_samples.device )
__lowerCamelCase : int = [self.index_for_timestep(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for t in timesteps]
__lowerCamelCase : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCamelCase : Dict = sigma.unsqueeze(-1 )
__lowerCamelCase : int = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Any:
return self.config.num_train_timesteps
| 13 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = 10
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = [1, 2, 3, 4]
a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = ''
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
a_ , a_ = process_story(UpperCAmelCase__ )
a_ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = ['It was the best of times.']
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = torch.tensor([1, 2, 3, 4] )
a_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = 101
a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 0 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCAmelCase ( __a : str ,__a : Optional[int] ) -> str:
"""simple docstring"""
assert isinstance(__a ,__a )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' ,[False, True] )
def __UpperCAmelCase ( __a : Any ,__a : Union[str, Any] ,__a : int ) -> Tuple:
"""simple docstring"""
_a : str = tmp_path / '''cache'''
_a : List[Any] = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a : List[Any] = TextDatasetReader(__a ,cache_dir=__a ,keep_in_memory=__a ).read()
_check_text_dataset(__a ,__a )
@pytest.mark.parametrize(
'''features''' ,[
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] ,)
def __UpperCAmelCase ( __a : Optional[int] ,__a : Optional[Any] ,__a : int ) -> List[str]:
"""simple docstring"""
_a : Dict = tmp_path / '''cache'''
_a : Optional[Any] = {'''text''': '''string'''}
_a : Optional[Any] = features.copy() if features else default_expected_features
_a : Tuple = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
_a : List[str] = TextDatasetReader(__a ,features=__a ,cache_dir=__a ).read()
_check_text_dataset(__a ,__a )
@pytest.mark.parametrize('''split''' ,[None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCAmelCase ( __a : str ,__a : int ,__a : List[str] ) -> List[Any]:
"""simple docstring"""
_a : Any = tmp_path / '''cache'''
_a : List[str] = {'''text''': '''string'''}
_a : str = TextDatasetReader(__a ,cache_dir=__a ,split=__a ).read()
_check_text_dataset(__a ,__a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' ,[str, list] )
def __UpperCAmelCase ( __a : int ,__a : int ,__a : str ) -> Optional[Any]:
"""simple docstring"""
if issubclass(__a ,__a ):
_a : Tuple = text_path
elif issubclass(__a ,__a ):
_a : Tuple = [text_path]
_a : Union[str, Any] = tmp_path / '''cache'''
_a : Any = {'''text''': '''string'''}
_a : int = TextDatasetReader(__a ,cache_dir=__a ).read()
_check_text_dataset(__a ,__a )
def __UpperCAmelCase ( __a : Tuple ,__a : Union[str, Any] ,__a : Optional[int]=("train",) ) -> List[Any]:
"""simple docstring"""
assert isinstance(__a ,__a )
for split in splits:
_a : Union[str, Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' ,[False, True] )
def __UpperCAmelCase ( __a : int ,__a : int ,__a : Optional[Any] ) -> Any:
"""simple docstring"""
_a : List[str] = tmp_path / '''cache'''
_a : List[str] = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a : Union[str, Any] = TextDatasetReader({'''train''': text_path} ,cache_dir=__a ,keep_in_memory=__a ).read()
_check_text_datasetdict(__a ,__a )
@pytest.mark.parametrize(
'''features''' ,[
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] ,)
def __UpperCAmelCase ( __a : Dict ,__a : List[str] ,__a : Optional[int] ) -> Tuple:
"""simple docstring"""
_a : Optional[Any] = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_a : Optional[Any] = {'''text''': '''string'''}
_a : str = features.copy() if features else default_expected_features
_a : Dict = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
_a : Union[str, Any] = TextDatasetReader({'''train''': text_path} ,features=__a ,cache_dir=__a ).read()
_check_text_datasetdict(__a ,__a )
@pytest.mark.parametrize('''split''' ,[None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCAmelCase ( __a : int ,__a : int ,__a : Dict ) -> Dict:
"""simple docstring"""
if split:
_a : Union[str, Any] = {split: text_path}
else:
_a : Any = '''train'''
_a : Any = {'''train''': text_path, '''test''': text_path}
_a : List[str] = tmp_path / '''cache'''
_a : Union[str, Any] = {'''text''': '''string'''}
_a : Tuple = TextDatasetReader(__a ,cache_dir=__a ).read()
_check_text_datasetdict(__a ,__a ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 14 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCAmelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class _snake_case ( unittest.TestCase , snake_case ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = load_tool('text-question-answering' )
self.tool.setup()
a_ = load_tool('text-question-answering' , remote=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.remote_tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = self.tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = self.remote_tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
| 697 | 0 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
A : Any = logging.getLogger(__name__)
A : int = 'Hello world! cécé herlolip'
A : Optional[int] = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
lowercase__ = BertAbsConfig(
temp_dir=""".""" , finetune_bert=__magic_name__ , large=__magic_name__ , share_emb=__magic_name__ , use_bert_emb=__magic_name__ , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
lowercase__ = torch.load(__magic_name__ , lambda __magic_name__ , __magic_name__ : storage )
lowercase__ = AbsSummarizer(__magic_name__ , torch.device("""cpu""" ) , __magic_name__ )
original.eval()
lowercase__ = BertAbsSummarizer(__magic_name__ , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
lowercase__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
lowercase__ = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__magic_name__ )) )
lowercase__ = torch.tensor(__magic_name__ ).unsqueeze(0 )
lowercase__ = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__magic_name__ )) )
lowercase__ = torch.tensor(__magic_name__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
lowercase__ = encoder_input_ids
lowercase__ = decoder_input_ids
lowercase__ = lowercase__ = None
lowercase__ = None
lowercase__ = lowercase__ = None
lowercase__ = lowercase__ = None
lowercase__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowercase__ = original(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )[0]
lowercase__ = original.generator(__magic_name__ )
lowercase__ = new_model(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )[0]
lowercase__ = new_model.generator(__magic_name__ )
lowercase__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__magic_name__ ) )
lowercase__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__magic_name__ ) )
lowercase__ = torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
A : Tuple = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 15 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 697 | 0 |
from __future__ import annotations
def __a ( A__ : list , A__ : int , A__ : int , A__ : int ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
SCREAMING_SNAKE_CASE = result + left + right
return input_list
def __a ( A__ : list ):
if len(A__ ) <= 1:
return input_list
SCREAMING_SNAKE_CASE = list(A__ )
# iteration for two-way merging
SCREAMING_SNAKE_CASE = 2
while p <= len(A__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(A__ ) , A__ ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = i + p - 1
SCREAMING_SNAKE_CASE = (low + high + 1) // 2
SCREAMING_SNAKE_CASE = merge(A__ , A__ , A__ , A__ )
# final merge of last two parts
if p * 2 >= len(A__ ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = merge(A__ , 0 , A__ , len(A__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__A : Any = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
__A : Union[str, Any] = []
else:
__A : Optional[Any] = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted)) | 16 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "vit"
def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict:
super().__init__(**UpperCAmelCase__ )
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = encoder_stride
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
return 1e-4
| 697 | 0 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : Dict = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : Tuple = '''mask2former'''
_lowercase : Optional[Any] = ['''swin''']
_lowercase : Dict = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Optional[Any] , __A : Optional[Dict] = None , __A : int = 256 , __A : int = 256 , __A : int = 256 , __A : int = 1024 , __A : str = "relu" , __A : int = 6 , __A : int = 10 , __A : int = 8 , __A : float = 0.0 , __A : int = 2048 , __A : bool = False , __A : bool = False , __A : int = 4 , __A : int = 255 , __A : int = 100 , __A : float = 0.1 , __A : float = 2.0 , __A : float = 5.0 , __A : float = 5.0 , __A : int = 1_2544 , __A : float = 3.0 , __A : float = 0.7_5 , __A : float = 0.0_2 , __A : float = 1.0 , __A : bool = True , __A : List[int] = [4, 8, 16, 32] , __A : bool = None , **__A : Dict , ):
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
__A : List[Any] = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__A , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__A , __A ):
__A : Any = backbone_config.pop("""model_type""" )
__A : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__A : str = config_class.from_dict(__A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
__A : int = backbone_config
__A : Optional[Any] = feature_size
__A : Union[str, Any] = mask_feature_size
__A : List[str] = hidden_dim
__A : Union[str, Any] = encoder_feedforward_dim
__A : int = activation_function
__A : Any = encoder_layers
__A : str = decoder_layers
__A : List[str] = num_attention_heads
__A : Tuple = dropout
__A : Tuple = dim_feedforward
__A : Optional[int] = pre_norm
__A : Optional[Any] = enforce_input_projection
__A : Any = common_stride
__A : Any = ignore_value
__A : List[str] = num_queries
__A : List[str] = no_object_weight
__A : str = class_weight
__A : Any = mask_weight
__A : Dict = dice_weight
__A : Optional[Any] = train_num_points
__A : str = oversample_ratio
__A : str = importance_sample_ratio
__A : List[str] = init_std
__A : Any = init_xavier_std
__A : Any = use_auxiliary_loss
__A : Optional[int] = feature_strides
__A : List[str] = output_auxiliary_logits
__A : List[str] = decoder_layers
super().__init__(**__A )
@classmethod
def lowerCAmelCase_ ( cls : str , __A : PretrainedConfig , **__A : int ):
return cls(
backbone_config=__A , **__A , )
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Optional[int] = copy.deepcopy(self.__dict__ )
__A : Any = self.backbone_config.to_dict()
__A : Optional[int] = self.__class__.model_type
return output
| 17 |
'''simple docstring'''
def a ( _UpperCAmelCase = 5_0 ) -> int:
"""simple docstring"""
a_ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 0 |
'''simple docstring'''
import math
import os
import sys
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
_lowerCAmelCase = ""
try:
with open(SCREAMING_SNAKE_CASE_ , "rb" ) as binary_file:
_lowerCAmelCase = binary_file.read()
for dat in data:
_lowerCAmelCase = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def __a(SCREAMING_SNAKE_CASE_ : dict[str, str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lexicon.pop(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = last_match_id
if math.loga(SCREAMING_SNAKE_CASE_ ).is_integer():
for curr_key in lexicon:
_lowerCAmelCase = "0" + lexicon[curr_key]
_lowerCAmelCase = bin(SCREAMING_SNAKE_CASE_ )[2:]
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
_lowerCAmelCase = {"0": "0", "1": "1"}
_lowerCAmelCase , _lowerCAmelCase = "", ""
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_lowerCAmelCase = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
index += 1
_lowerCAmelCase = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
_lowerCAmelCase = lexicon[curr_string]
result += last_match_id
return result
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
_lowerCAmelCase = os.path.getsize(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = bin(SCREAMING_SNAKE_CASE_ )[2:]
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
return "0" * (length_length - 1) + file_length_binary + compressed
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
_lowerCAmelCase = 8
try:
with open(SCREAMING_SNAKE_CASE_ , "wb" ) as opened_file:
_lowerCAmelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(SCREAMING_SNAKE_CASE_ , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
_lowerCAmelCase = read_file_binary(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = compress_data(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = add_file_length(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
write_file_binary(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 18 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def a ( ) -> None:
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 697 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 19 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _snake_case ( snake_case ):
"""simple docstring"""
@staticmethod
def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> str:
a_ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=UpperCAmelCase__ , help='Name of the model to download' )
download_parser.set_defaults(func=UpperCAmelCase__ )
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = model
a_ = cache
a_ = force
a_ = trust_remote_code
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 697 | 0 |
def _lowercase( __a : int ):
if not isinstance(__a , __a ):
a__ =f"""Input value of [number={number}] must be an integer"""
raise TypeError(__a )
if number < 0:
return False
a__ =number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
assert column_title.isupper()
a_ = 0
a_ = len(_UpperCAmelCase ) - 1
a_ = 0
while index >= 0:
a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """blenderbot-small"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Any , __snake_case :Optional[int]=5_02_65 , __snake_case :Any=5_12 , __snake_case :Tuple=8 , __snake_case :Optional[Any]=20_48 , __snake_case :List[Any]=16 , __snake_case :Any=8 , __snake_case :Union[str, Any]=20_48 , __snake_case :Any=16 , __snake_case :List[str]=0.0 , __snake_case :Dict=0.0 , __snake_case :str=True , __snake_case :Optional[int]=True , __snake_case :Optional[int]="gelu" , __snake_case :Dict=5_12 , __snake_case :Optional[Any]=0.1 , __snake_case :Tuple=0.0 , __snake_case :Optional[Any]=0.0 , __snake_case :Optional[int]=0.02 , __snake_case :Optional[int]=1 , __snake_case :str=False , __snake_case :List[Any]=0 , __snake_case :int=1 , __snake_case :List[Any]=2 , __snake_case :Optional[Any]=2 , **__snake_case :str , ):
'''simple docstring'''
__magic_name__ : int =vocab_size
__magic_name__ : Optional[Any] =max_position_embeddings
__magic_name__ : Optional[Any] =d_model
__magic_name__ : str =encoder_ffn_dim
__magic_name__ : Tuple =encoder_layers
__magic_name__ : List[str] =encoder_attention_heads
__magic_name__ : Union[str, Any] =decoder_ffn_dim
__magic_name__ : int =decoder_layers
__magic_name__ : Tuple =decoder_attention_heads
__magic_name__ : Tuple =dropout
__magic_name__ : List[str] =attention_dropout
__magic_name__ : int =activation_dropout
__magic_name__ : Union[str, Any] =activation_function
__magic_name__ : Any =init_std
__magic_name__ : Any =encoder_layerdrop
__magic_name__ : Optional[int] =decoder_layerdrop
__magic_name__ : int =use_cache
__magic_name__ : Dict =encoder_layers
__magic_name__ : Tuple =scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
class __A ( UpperCamelCase__ ):
@property
def A__ ( self :Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : List[Any] =OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__magic_name__ : Tuple ={0: """batch"""}
__magic_name__ : Tuple ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__magic_name__ : Optional[Any] ={0: """batch""", 1: """decoder_sequence"""}
__magic_name__ : Union[str, Any] ={0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__magic_name__ : Optional[Any] =OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__magic_name__ , __magic_name__ : List[Any] =self.num_layers
for i in range(__snake_case ):
__magic_name__ : Optional[int] ={0: """batch""", 2: """past_sequence + sequence"""}
__magic_name__ : str ={0: """batch""", 2: """past_sequence + sequence"""}
else:
__magic_name__ : Dict =OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def A__ ( self :Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : List[Any] =super().outputs
else:
__magic_name__ : Optional[Any] =super(__snake_case , self ).outputs
if self.use_past:
__magic_name__ , __magic_name__ : int =self.num_layers
for i in range(__snake_case ):
__magic_name__ : Optional[int] ={0: """batch""", 2: """past_sequence + sequence"""}
__magic_name__ : str ={0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def A__ ( self :List[Any] , __snake_case :PreTrainedTokenizer , __snake_case :int = -1 , __snake_case :int = -1 , __snake_case :bool = False , __snake_case :Optional[TensorType] = None , ):
'''simple docstring'''
__magic_name__ : str =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Generate decoder inputs
__magic_name__ : List[str] =seq_length if not self.use_past else 1
__magic_name__ : Any =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
__magic_name__ : Any ={f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__magic_name__ : List[str] =dict(**__snake_case , **__snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__magic_name__ , __magic_name__ : int =common_inputs["""input_ids"""].shape
__magic_name__ : Any =common_inputs["""decoder_input_ids"""].shape[1]
__magic_name__ , __magic_name__ : Optional[int] =self.num_attention_heads
__magic_name__ : int =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__magic_name__ : List[Any] =decoder_seq_length + 3
__magic_name__ : List[Any] =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__magic_name__ : Any =torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__snake_case , __snake_case )] , dim=1 )
__magic_name__ : Optional[Any] =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__magic_name__ , __magic_name__ : Dict =self.num_layers
__magic_name__ : List[Any] =min(__snake_case , __snake_case )
__magic_name__ : List[Any] =max(__snake_case , __snake_case ) - min_num_layers
__magic_name__ : Optional[Any] ="""encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__snake_case ):
common_inputs["past_key_values"].append(
(
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
) )
# TODO: test this.
__magic_name__ : int =encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__snake_case , __snake_case ):
common_inputs["past_key_values"].append((torch.zeros(__snake_case ), torch.zeros(__snake_case )) )
return common_inputs
def A__ ( self :List[str] , __snake_case :PreTrainedTokenizer , __snake_case :int = -1 , __snake_case :int = -1 , __snake_case :bool = False , __snake_case :Optional[TensorType] = None , ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__magic_name__ , __magic_name__ : int =common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__magic_name__ : int =seqlen + 2
__magic_name__ , __magic_name__ : Tuple =self.num_layers
__magic_name__ , __magic_name__ : List[str] =self.num_attention_heads
__magic_name__ : List[Any] =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__magic_name__ : Union[str, Any] =common_inputs["""attention_mask"""].dtype
__magic_name__ : int =torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 )
__magic_name__ : Dict =[
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(__snake_case )
]
return common_inputs
def A__ ( self :Any , __snake_case :PreTrainedTokenizer , __snake_case :int = -1 , __snake_case :int = -1 , __snake_case :bool = False , __snake_case :Optional[TensorType] = None , ):
'''simple docstring'''
__magic_name__ : Tuple =compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__magic_name__ : Any =tokenizer.num_special_tokens_to_add(__snake_case )
__magic_name__ : Optional[Any] =compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__snake_case )
# Generate dummy inputs according to compute batch and sequence
__magic_name__ : List[str] =[""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__magic_name__ : int =dict(tokenizer(__snake_case , return_tensors=__snake_case ) )
return common_inputs
def A__ ( self :Optional[Any] , __snake_case :PreTrainedTokenizer , __snake_case :int = -1 , __snake_case :int = -1 , __snake_case :bool = False , __snake_case :Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Any =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
elif self.task == "causal-lm":
__magic_name__ : List[Any] =self._generate_dummy_inputs_for_causal_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
else:
__magic_name__ : int =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
return common_inputs
def A__ ( self :List[str] , __snake_case :Any , __snake_case :Dict , __snake_case :Any , __snake_case :Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Union[str, Any] =super()._flatten_past_key_values_(__snake_case , __snake_case , __snake_case , __snake_case )
else:
__magic_name__ : Optional[Any] =super(__snake_case , self )._flatten_past_key_values_(
__snake_case , __snake_case , __snake_case , __snake_case )
| 21 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase =logging.get_logger(__name__)
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , UpperCAmelCase__=2048 , UpperCAmelCase__=1 , UpperCAmelCase__=[16, 16] , UpperCAmelCase__=128 , UpperCAmelCase__=4_4100 , UpperCAmelCase__=86 , UpperCAmelCase__=2048 , UpperCAmelCase__=0.0 , **UpperCAmelCase__ , ) -> Union[str, Any]:
super().__init__(
feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = spectrogram_length
a_ = num_channels
a_ = patch_size
a_ = feature_size // self.patch_size[1]
a_ = n_fft
a_ = sampling_rate // hop_length_to_sampling_rate
a_ = sampling_rate
a_ = padding_value
a_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=UpperCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> np.ndarray:
a_ = spectrogram(
UpperCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
a_ = log_spec[:, :-1]
a_ = log_spec - 2_0.0
a_ = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , **UpperCAmelCase__ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a_ = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
a_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a_ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCAmelCase__ ):
a_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a_ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a_ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a_ = np.array(UpperCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
a_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a_ = np.ones([len(UpperCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a_ = padded_audio_features * self.padding_value
for i in range(len(UpperCAmelCase__ ) ):
a_ = audio_features[i]
a_ = feature
# return as BatchFeature
if return_attention_mask:
a_ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
a_ = {'audio_values': padded_audio_features}
a_ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
return encoded_inputs
| 697 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
_a = number_of_bytes // partitions
_a = []
for i in range(UpperCamelCase ):
_a = i * bytes_per_partition + 1
_a = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
a_ = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
a_ = [element for element in set_a if element in set_b]
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
a_ = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
__lowerCAmelCase ={"a", "b", "c", "d", "e"}
__lowerCAmelCase ={"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 697 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = inspect.getfile(accelerate.test_utils )
UpperCamelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
UpperCamelCase_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
UpperCamelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def _UpperCAmelCase ( self ) -> Dict:
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCamelCase_ = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def _UpperCAmelCase ( self ) -> Optional[Any]:
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCamelCase_ = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def _UpperCAmelCase ( self ) -> List[str]:
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
UpperCamelCase_ = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
snake_case__ : Dict = Accelerator()
snake_case__ : str = (accelerator.state.process_index + 2, 1_0)
snake_case__ : int = torch.randint(0, 1_0, shape).to(accelerator.device)
snake_case__ : List[Any] = """"""
snake_case__ : Any = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
snake_case__ : Optional[int] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
snake_case__ : Optional[Any] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 23 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
a_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase_ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def _UpperCamelCase (_lowerCamelCase : str )-> List[Any]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(_lowerCamelCase , _lowerCamelCase )
return k
def _UpperCamelCase (_lowerCamelCase : dict , _lowerCamelCase : dict )-> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(_lowerCamelCase )
__snake_case = PegasusConfig(**_lowerCamelCase )
__snake_case = PegasusForConditionalGeneration(_lowerCamelCase )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(_lowerCamelCase )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(_lowerCamelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
__snake_case = mapping['''shared.weight''']
__snake_case = mapping['''shared.weight''']
__snake_case = {k: torch.zeros_like(_lowerCamelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**_lowerCamelCase )
__snake_case , __snake_case = torch_model.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
__snake_case = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def _UpperCamelCase (_lowerCamelCase : int="./ckpt/aeslc/model.ckpt-32000" )-> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(_lowerCamelCase )
__snake_case = {}
__snake_case = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(_lowerCamelCase , desc='''converting tf checkpoint to dict''' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
__snake_case = array
return tf_weights
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : str )-> List[Any]:
'''simple docstring'''
__snake_case = Path(_lowerCamelCase ).parent.name
__snake_case = task_specific_params[f'''summarization_{dataset}''']['''max_position_embeddings''']
__snake_case = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=_lowerCamelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_lowerCamelCase )
# convert model
__snake_case = get_tf_weights_as_numpy(_lowerCamelCase )
__snake_case = task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(_lowerCamelCase , _lowerCamelCase )
torch_model.save_pretrained(_lowerCamelCase )
__snake_case = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(_lowerCamelCase , Path(_lowerCamelCase ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCAmelCase_ : str = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase_ : Optional[int] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase_ : Tuple = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 24 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _snake_case ( snake_case ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> str:
super().__init__(
features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = Generator(
cache_dir=UpperCAmelCase__ , features=UpperCAmelCase__ , generator=UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
a_ = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
a_ = None
a_ = None
a_ = None
a_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , )
a_ = self.builder.as_dataset(
split='train' , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 697 | 0 |
def lowerCamelCase__ ( _a):
if n == 1 or not isinstance(_a , _a):
return 0
elif n == 2:
return 1
else:
SCREAMING_SNAKE_CASE : Optional[int] = [0, 1]
for i in range(2 , n + 1):
sequence.append(sequence[i - 1] + sequence[i - 2])
return sequence[n]
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : List[str] = 2
while digits < n:
index += 1
SCREAMING_SNAKE_CASE : Dict = len(str(fibonacci(_a)))
return index
def lowerCamelCase__ ( _a = 1000):
return fibonacci_digits_index(_a)
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 25 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase =numpy.array([0, 0])
__lowerCAmelCase =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase =numpy.array([1, 0])
__lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = initial_vectors
for _ in range(_UpperCAmelCase ):
a_ = iteration_step(_UpperCAmelCase )
return vectors
def a ( _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = []
for i, start_vector in enumerate(vectors[:-1] ):
a_ = vectors[i + 1]
new_vectors.append(_UpperCAmelCase )
a_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray:
"""simple docstring"""
a_ = numpy.radians(_UpperCAmelCase )
a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase )
a_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCAmelCase , _UpperCAmelCase )
def a ( _UpperCAmelCase ) -> None:
"""simple docstring"""
a_ = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a_ , a_ = zip(*_UpperCAmelCase )
plt.plot(_UpperCAmelCase , _UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 697 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _A ( __lowercase ):
lowercase__: Any = '''decision_transformer'''
lowercase__: Union[str, Any] = ['''past_key_values''']
lowercase__: List[str] = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Tuple , __magic_name__ : Optional[Any]=17 , __magic_name__ : Any=4 , __magic_name__ : Union[str, Any]=1_28 , __magic_name__ : Optional[Any]=40_96 , __magic_name__ : List[Any]=True , __magic_name__ : List[Any]=1 , __magic_name__ : Optional[Any]=10_24 , __magic_name__ : Union[str, Any]=3 , __magic_name__ : Tuple=1 , __magic_name__ : Dict=None , __magic_name__ : Tuple="relu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : str=1E-5 , __magic_name__ : Dict=0.02 , __magic_name__ : List[str]=True , __magic_name__ : List[str]=True , __magic_name__ : Optional[int]=5_02_56 , __magic_name__ : Optional[Any]=5_02_56 , __magic_name__ : int=False , __magic_name__ : Union[str, Any]=False , **__magic_name__ : int , ) -> int:
"""simple docstring"""
__snake_case : str = state_dim
__snake_case : Optional[Any] = act_dim
__snake_case : Dict = hidden_size
__snake_case : int = max_ep_len
__snake_case : Any = action_tanh
__snake_case : Union[str, Any] = vocab_size
__snake_case : Optional[Any] = n_positions
__snake_case : Optional[int] = n_layer
__snake_case : List[Any] = n_head
__snake_case : Tuple = n_inner
__snake_case : int = activation_function
__snake_case : List[Any] = resid_pdrop
__snake_case : Union[str, Any] = embd_pdrop
__snake_case : int = attn_pdrop
__snake_case : Optional[Any] = layer_norm_epsilon
__snake_case : Optional[Any] = initializer_range
__snake_case : str = scale_attn_weights
__snake_case : str = use_cache
__snake_case : List[Any] = scale_attn_by_inverse_layer_idx
__snake_case : Dict = reorder_and_upcast_attn
__snake_case : Any = bos_token_id
__snake_case : Dict = eos_token_id
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 26 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def a ( _UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for char in word:
a_ = ord(_UpperCAmelCase )
if not _is_chinese_char(_UpperCAmelCase ):
return 0
return 1
def a ( _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
a_ = set()
for token in tokens:
a_ = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase )
if chinese_word:
word_set.add(_UpperCAmelCase )
a_ = list(_UpperCAmelCase )
return word_list
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
a_ = max([len(_UpperCAmelCase ) for w in chinese_word_set] )
a_ = bert_tokens
a_ , a_ = 0, len(_UpperCAmelCase )
while start < end:
a_ = True
if is_chinese(bert_word[start] ):
a_ = min(end - start , _UpperCAmelCase )
for i in range(_UpperCAmelCase , 1 , -1 ):
a_ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a_ = '##' + bert_word[j]
a_ = start + i
a_ = False
break
if single_word:
start += 1
return bert_word
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
"""simple docstring"""
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws
a_ = [get_chinese_word(_UpperCAmelCase ) for r in res]
ltp_res.extend(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ):
a_ = []
for id in input_ids:
a_ = bert_tokenizer._convert_id_to_token(_UpperCAmelCase )
input_tokens.append(_UpperCAmelCase )
a_ = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase )
a_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCAmelCase ):
if token[:2] == "##":
a_ = token[2:]
# save chinese tokens' pos
if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ):
ref_id.append(_UpperCAmelCase )
ref_ids.append(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
return ref_ids
def a ( _UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
a_ = f.readlines()
a_ = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a_ = LTP(args.ltp ) # faster in GPU device
a_ = BertTokenizer.from_pretrained(args.bert )
a_ = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
a_ = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__lowerCAmelCase =parser.parse_args()
main(args)
| 697 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = AltDiffusionPipeline
__magic_name__ = TEXT_TO_IMAGE_PARAMS
__magic_name__ = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_A = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
_A = CLIPTextModel(snake_case_ )
_A = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
_A = 77
_A = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase__ ( self , snake_case_ , snake_case_=0 ):
if str(snake_case_ ).startswith('mps' ):
_A = torch.manual_seed(snake_case_ )
else:
_A = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCAmelCase__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCAmelCase__ ( self ):
_A = 'cpu' # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
torch.manual_seed(0 )
_A = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
_A = RobertaSeriesModelWithTransformation(snake_case_ )
_A = text_encoder
_A = AltDiffusionPipeline(**snake_case_ )
_A = alt_pipe.to(snake_case_ )
alt_pipe.set_progress_bar_config(disable=snake_case_ )
_A = self.get_dummy_inputs(snake_case_ )
_A = 'A photo of an astronaut'
_A = alt_pipe(**snake_case_ )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array(
[0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self ):
_A = 'cpu' # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = PNDMScheduler(skip_prk_steps=snake_case_ )
torch.manual_seed(0 )
_A = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
_A = RobertaSeriesModelWithTransformation(snake_case_ )
_A = text_encoder
_A = AltDiffusionPipeline(**snake_case_ )
_A = alt_pipe.to(snake_case_ )
alt_pipe.set_progress_bar_config(disable=snake_case_ )
_A = self.get_dummy_inputs(snake_case_ )
_A = alt_pipe(**snake_case_ )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array(
[0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ):
# make sure here that pndm scheduler skips prk
_A = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=snake_case_ )
_A = alt_pipe.to(snake_case_ )
alt_pipe.set_progress_bar_config(disable=snake_case_ )
_A = 'A painting of a squirrel eating a burger'
_A = torch.manual_seed(0 )
_A = alt_pipe([prompt] , generator=snake_case_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self ):
_A = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
_A = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=snake_case_ , safety_checker=snake_case_ )
_A = alt_pipe.to(snake_case_ )
alt_pipe.set_progress_bar_config(disable=snake_case_ )
_A = 'A painting of a squirrel eating a burger'
_A = torch.manual_seed(0 )
_A = alt_pipe([prompt] , generator=snake_case_ , num_inference_steps=2 , output_type='numpy' )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 27 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": 512,
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = RetriBertTokenizer
_UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__="[UNK]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[PAD]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> int:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
a_ = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) )
a_ = do_lower_case
a_ = strip_accents
a_ = tokenize_chinese_chars
a_ = normalizer_class(**UpperCAmelCase__ )
a_ = do_lower_case
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str:
a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]:
a_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 697 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "lilt"
def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__="absolute" , UpperCAmelCase__=None , UpperCAmelCase__=4 , UpperCAmelCase__=1024 , **UpperCAmelCase__ , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = classifier_dropout
a_ = channel_shrink_ratio
a_ = max_ad_position_embeddings
| 697 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=lowerCAmelCase ):
a__: Optional[Any] = ['keras_nlp']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
requires_backends(self , ['''keras_nlp'''] )
| 29 |
'''simple docstring'''
from __future__ import annotations
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# We need to create solution object to save path.
a_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
a_ = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase )
if solved:
print('\n'.join(str(_UpperCAmelCase ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
a_ = 1
return True
a_ = (not i < 0) and (not j < 0) # Check lower bounds
a_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
a_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
a_ = 1
# check for directions
if (
run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase )
):
return True
a_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 0 |
from heapq import heappop, heappush
import numpy as np
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , ):
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_ : Dict = grid.shape
UpperCAmelCase_ : str = [-1, 1, 0, 0]
UpperCAmelCase_ : Dict = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCAmelCase_, UpperCAmelCase_ : Any = [(0, source)], set()
UpperCAmelCase_ : int = np.full((rows, cols) , np.inf )
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : List[str] = np.empty((rows, cols) , dtype=_lowercase )
UpperCAmelCase_ : Tuple = None
while queue:
((UpperCAmelCase_), (UpperCAmelCase_)) : Dict = heappop(_lowercase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCAmelCase_ : Optional[Any] = []
while (x, y) != source:
path.append((x, y) )
UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = predecessors[x, y]
path.append(_lowercase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_lowercase ) ):
UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCAmelCase_ : Tuple = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_lowercase , (dist + 1, (nx, ny)) )
UpperCAmelCase_ : str = dist + 1
UpperCAmelCase_ : List[str] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 |
'''simple docstring'''
__lowerCAmelCase ={
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
__lowerCAmelCase ={
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
"""simple docstring"""
a_ = from_type.lower().strip('s' )
a_ = to_type.lower().strip('s' )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
a_ = METRIC_CONVERSION[from_sanitized]
a_ = METRIC_CONVERSION[to_sanitized]
a_ = 1
if from_exponent > to_exponent:
a_ = from_exponent - to_exponent
else:
a_ = -(to_exponent - from_exponent)
return value * pow(1_0 , _UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 0 |
from functools import reduce
lowerCamelCase__ : Any = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def UpperCAmelCase_ ( __UpperCAmelCase : str = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __UpperCAmelCase , __UpperCAmelCase : str(int(__UpperCAmelCase ) * int(__UpperCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(__UpperCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(f'''{solution() = }''') | 31 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__lowerCAmelCase ="naver-clova-ix/donut-base"
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = DonutProcessor.from_pretrained(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
a_ = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
a_ = self.processor.tokenajson(UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 0 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ = logging.getLogger()
def A__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCAmelCase = parser.parse_args()
return args.f
def A__ ( SCREAMING_SNAKE_CASE_ : Dict ) -> str:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''all_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
_UpperCAmelCase = json.load(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def A__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
UpperCAmelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __UpperCamelCase ( A__ ):
@classmethod
def UpperCamelCase( cls ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def UpperCamelCase( cls ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_UpperCAmelCase = 7 if get_gpu_count() > 1 else 2
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''translation_no_trainer''' ) ) )
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCamelCase )
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''image_classification_no_trainer''' ) ) ) | 32 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="resnet50" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , ) -> Optional[Any]:
a_ = parent
a_ = out_indices if out_indices is not None else [4]
a_ = stage_names
a_ = out_features
a_ = backbone
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = use_pretrained_backbone
a_ = is_training
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = TimmBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
a_ = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = self.prepare_config_and_inputs()
a_ , a_ = config_and_inputs
a_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (TimmBackbone,) if is_torch_available() else ()
_UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = TimmBackboneModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = 'resnet18'
a_ = 'microsoft/resnet-18'
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ , out_indices=[1, 2, 3] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
a_ = self.has_attentions
# no need to test all models as different heads yield the same functionality
a_ = self.all_model_classes[0]
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = model(**UpperCAmelCase__ )
a_ = outputs[0][-1]
# Encoder-/Decoder-only models
a_ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a_ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = None
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = False
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
| 697 | 0 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = ['image_processor', 'tokenizer']
__lowercase : str = 'AutoImageProcessor'
__lowercase : Dict = 'AutoTokenizer'
def __init__( self:int , _a:List[str]=None , _a:Optional[Any]=None , **_a:List[str] ):
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
snake_case__ = kwargs.pop('''feature_extractor''' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
snake_case__ = self.image_processor
snake_case__ = False
def __call__( self:Optional[int] , *_a:str , **_a:int ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
snake_case__ = kwargs.pop('''images''' , _a )
snake_case__ = kwargs.pop('''text''' , _a )
if len(_a ) > 0:
snake_case__ = args[0]
snake_case__ = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case__ = self.image_processor(_a , *_a , **_a )
if text is not None:
snake_case__ = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Union[str, Any] , **_a:Any ):
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , *_a:Union[str, Any] , **_a:Optional[int] ):
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
snake_case__ = True
snake_case__ = self.tokenizer
yield
snake_case__ = self.image_processor
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Dict , _a:Dict=False , _a:Optional[int]=None ):
if added_vocab is None:
snake_case__ = self.tokenizer.get_added_vocab()
snake_case__ = {}
while tokens:
snake_case__ = re.search(r'''<s_(.*?)>''' , _a , re.IGNORECASE )
if start_token is None:
break
snake_case__ = start_token.group(1 )
snake_case__ = re.search(rF"""</s_{key}>""" , _a , re.IGNORECASE )
snake_case__ = start_token.group()
if end_token is None:
snake_case__ = tokens.replace(_a , '''''' )
else:
snake_case__ = end_token.group()
snake_case__ = re.escape(_a )
snake_case__ = re.escape(_a )
snake_case__ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _a , re.IGNORECASE )
if content is not None:
snake_case__ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
snake_case__ = self.tokenajson(_a , is_inner_value=_a , added_vocab=_a )
if value:
if len(_a ) == 1:
snake_case__ = value[0]
snake_case__ = value
else: # leaf nodes
snake_case__ = []
for leaf in content.split(r'''<sep/>''' ):
snake_case__ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
snake_case__ = leaf[1:-2] # for categorical special tokens
output[key].append(_a )
if len(output[key] ) == 1:
snake_case__ = output[key][0]
snake_case__ = tokens[tokens.find(_a ) + len(_a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_a , added_vocab=_a )
if len(_a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 33 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , ) -> List[Any]:
a_ = size if size is not None else {'height': 18, 'width': 18}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = image_size
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = apply_ocr
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# with apply_OCR = True
a_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
a_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
a_ = Image.open(ds[0]['file'] ).convert('RGB' )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
a_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
a_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 697 | 0 |
"""simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE_ = os.path.join(git_repo_path, 'src', 'transformers')
SCREAMING_SNAKE_CASE_ = '\n{0} = None\n'
SCREAMING_SNAKE_CASE_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
SCREAMING_SNAKE_CASE_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(lowerCamelCase_)
UpperCamelCase = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(lowerCamelCase_ , '''tokenizers''')
UpperCamelCase = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(lowerCamelCase_ , '''tensorflow_text''')
UpperCamelCase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tensorflow_text''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers_and_vision''')
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCamelCase_)
self.assertIn('''tensorflow_text''' , lowerCamelCase_)
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCamelCase_)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , '''\nCONSTANT = None\n''')
UpperCamelCase = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
lowerCamelCase_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
UpperCamelCase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
UpperCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , lowerCamelCase_) | 34 |
'''simple docstring'''
import math
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( _UpperCAmelCase = 1_0_0_0_1 ) -> int:
"""simple docstring"""
try:
a_ = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
a_ = []
a_ = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 0 |
from __future__ import annotations
def a ( A__ ) -> list[int]:
'''simple docstring'''
if len(A__ ) == 0:
return array
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = min(A__ ), max(A__ )
# Compute the variables
SCREAMING_SNAKE_CASE__ : Dict = _max - _min + 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
SCREAMING_SNAKE_CASE__ : List[str] = i - _min
SCREAMING_SNAKE_CASE__ : Optional[Any] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(A__ ):
while holes_repeat[i] > 0:
SCREAMING_SNAKE_CASE__ : str = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ :Any = input('Enter numbers separated by comma:\n')
a_ :Any = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 35 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = 10
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = [1, 2, 3, 4]
a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = ''
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
a_ , a_ = process_story(UpperCAmelCase__ )
a_ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = ['It was the best of times.']
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = torch.tensor([1, 2, 3, 4] )
a_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = 101
a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowercase ( __A : Union[str, Any] , __A : int , __A : Dict , __A : Optional[int] ) -> str:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowercase ( __A : str , __A : List[str] , __A : Tuple , __A : Dict , __A : Tuple=True ) -> Tuple:
'''simple docstring'''
model.train()
snake_case : List[Any] = model(__A )
snake_case : int = F.mse_loss(__A , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__A )
def lowercase ( __A : List[str] , __A : int=False ) -> Union[str, Any]:
'''simple docstring'''
set_seed(42 )
snake_case : Optional[Any] = RegressionModel()
snake_case : Dict = deepcopy(__A )
snake_case : Optional[Any] = RegressionDataset(length=80 )
snake_case : Optional[Any] = DataLoader(__A , batch_size=16 )
model.to(accelerator.device )
if sched:
snake_case : List[str] = AdamW(params=model.parameters() , lr=1E-3 )
snake_case : List[str] = AdamW(params=ddp_model.parameters() , lr=1E-3 )
snake_case : int = LambdaLR(__A , lr_lambda=lambda __A : epoch**0.65 )
snake_case : Any = LambdaLR(__A , lr_lambda=lambda __A : epoch**0.65 )
# Make a copy of `model`
if sched:
snake_case , snake_case , snake_case , snake_case : Optional[int] = accelerator.prepare(__A , __A , __A , __A )
else:
snake_case , snake_case : Any = accelerator.prepare(__A , __A )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowercase ( __A : List[Any] ) -> Any:
'''simple docstring'''
snake_case , snake_case , snake_case : Union[str, Any] = get_training_setup(__A )
# Use a single batch
snake_case , snake_case : str = next(iter(__A ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case , snake_case : List[Any] = accelerator.gather((ddp_input, ddp_target) )
snake_case , snake_case : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__A , __A , __A , __A )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__A ):
step_model(__A , __A , __A , __A )
else:
# Sync grads
step_model(__A , __A , __A , __A )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__A , __A , __A , __A )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case : Tuple = ddp_input[torch.randperm(len(__A ) )]
def lowercase ( __A : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case , snake_case , snake_case : str = get_training_setup(__A )
# Use a single batch
snake_case , snake_case : Any = next(iter(__A ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case , snake_case : List[str] = accelerator.gather((ddp_input, ddp_target) )
snake_case , snake_case : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__A , __A , __A , __A )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__A ):
step_model(__A , __A , __A , __A )
else:
# Sync grads
step_model(__A , __A , __A , __A )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case : Optional[int] = ddp_input[torch.randperm(len(__A ) )]
def lowercase ( __A : Union[str, Any]=False , __A : Any=False ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = Accelerator(
split_batches=__A , dispatch_batches=__A , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case , snake_case , snake_case : Optional[int] = get_training_setup(__A )
for iteration, batch in enumerate(__A ):
snake_case , snake_case : Optional[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case , snake_case : List[str] = accelerator.gather((ddp_input, ddp_target) )
snake_case , snake_case : Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__A , __A , __A , __A , __A )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__A ):
step_model(__A , __A , __A , __A )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__A ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case : Tuple = ddp_input[torch.randperm(len(__A ) )]
GradientState._reset_state()
def lowercase ( __A : Any=False , __A : str=False ) -> str:
'''simple docstring'''
snake_case : Any = Accelerator(
split_batches=__A , dispatch_batches=__A , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case : int = get_training_setup(__A , __A )
for iteration, batch in enumerate(__A ):
snake_case , snake_case : Optional[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case , snake_case : List[Any] = accelerator.gather((ddp_input, ddp_target) )
snake_case , snake_case : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__A , __A , __A , __A , __A )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__A )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__A ):
step_model(__A , __A , __A , __A )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
snake_case : List[str] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__A ))
if accelerator.num_processes > 1:
check_model_parameters(__A , __A , __A , __A )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def lowercase ( ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = Accelerator()
snake_case : Dict = RegressionDataset(length=80 )
snake_case : Tuple = DataLoader(__A , batch_size=16 )
snake_case : Tuple = RegressionDataset(length=96 )
snake_case : Optional[Any] = DataLoader(__A , batch_size=16 )
snake_case , snake_case : Any = accelerator.prepare(__A , __A )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__A ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__A )
if iteration < len(__A ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__A ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__A )
if batch_num < len(__A ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowercase ( ) -> List[str]:
'''simple docstring'''
snake_case : str = Accelerator()
snake_case : Dict = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(__A )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(__A )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(__A , __A )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(__A , __A )
def lowercase ( __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 36 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCAmelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class _snake_case ( unittest.TestCase , snake_case ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = load_tool('text-question-answering' )
self.tool.setup()
a_ = load_tool('text-question-answering' , remote=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.remote_tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = self.tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = self.remote_tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
| 697 | 0 |
def UpperCamelCase_ ( __a = 1_000_000 ) -> int:
a__ : int = limit + 1
a__ : str = [0] * limit
for first_term in range(1 , __a ):
for n in range(__a , __a , __a ):
a__ : Optional[int] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a__ : Tuple = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 37 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 697 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Tuple = 1
while len(__magic_name__ ) < 1E6:
constant.append(str(__magic_name__ ) )
i += 1
snake_case__ : Optional[Any] = """""".join(__magic_name__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution())
| 38 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "vit"
def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict:
super().__init__(**UpperCAmelCase__ )
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = encoder_stride
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
return 1e-4
| 697 | 0 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCAmelCase_ = open # noqa: we just need to have a builtin inside this module to test it properly | 39 |
'''simple docstring'''
def a ( _UpperCAmelCase = 5_0 ) -> int:
"""simple docstring"""
a_ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 0 |
def UpperCamelCase ( snake_case__ : list[int] , snake_case__ : list[int] ) -> None:
UpperCamelCase : int = len(snake_case__ )
print('The following activities are selected:' )
# The first activity is always selected
UpperCamelCase : List[Any] = 0
print(snake_case__ , end=',' )
# Consider rest of the activities
for j in range(snake_case__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case__ , end=',' )
UpperCamelCase : Tuple = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = [1, 3, 0, 5, 8, 5]
__UpperCAmelCase = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 40 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def a ( ) -> None:
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 697 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : int ,lowercase__ : Any=7 ,lowercase__ : Optional[Any]=3 ,lowercase__ : Any=1_8 ,lowercase__ : Optional[Any]=3_0 ,lowercase__ : Any=4_0_0 ,lowercase__ : str=True ,lowercase__ : Union[str, Any]=3_2 ,lowercase__ : Tuple=True ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size_divisor
__lowercase = do_rescale
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = GLPNImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = GLPNImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ ,'''do_resize''' ) )
self.assertTrue(hasattr(lowercase__ ,'''size_divisor''' ) )
self.assertTrue(hasattr(lowercase__ ,'''resample''' ) )
self.assertTrue(hasattr(lowercase__ ,'''do_rescale''' ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ ,numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE ( self : Dict ):
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ ,torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 41 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _snake_case ( snake_case ):
"""simple docstring"""
@staticmethod
def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> str:
a_ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=UpperCAmelCase__ , help='Name of the model to download' )
download_parser.set_defaults(func=UpperCAmelCase__ )
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = model
a_ = cache
a_ = force
a_ = trust_remote_code
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 697 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
# Construct model
if openai_config_file == "":
lowerCamelCase_ = OpenAIGPTConfig()
else:
lowerCamelCase_ = OpenAIGPTConfig.from_json_file(__UpperCamelCase )
lowerCamelCase_ = OpenAIGPTModel(__UpperCamelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Save pytorch-model
lowerCamelCase_ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCamelCase_ = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() ,__UpperCamelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__UpperCamelCase ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
A_ = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 42 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
assert column_title.isupper()
a_ = 0
a_ = len(_UpperCAmelCase ) - 1
a_ = 0
while index >= 0:
a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowerCAmelCase = None
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
lowerCAmelCase = {
'camembert-base': 512,
}
lowerCAmelCase = '▁'
class _a ( UpperCamelCase__ ):
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
_lowercase : Tuple = CamembertTokenizer
def __init__( self: List[Any] , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Dict=None , UpperCamelCase_: str="<s>" , UpperCamelCase_: Any="</s>" , UpperCamelCase_: Dict="</s>" , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Tuple="<pad>" , UpperCamelCase_: Optional[Any]="<mask>" , UpperCamelCase_: Tuple=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCamelCase_: Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def lowerCamelCase_ ( self: int , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 43 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase =logging.get_logger(__name__)
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , UpperCAmelCase__=2048 , UpperCAmelCase__=1 , UpperCAmelCase__=[16, 16] , UpperCAmelCase__=128 , UpperCAmelCase__=4_4100 , UpperCAmelCase__=86 , UpperCAmelCase__=2048 , UpperCAmelCase__=0.0 , **UpperCAmelCase__ , ) -> Union[str, Any]:
super().__init__(
feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = spectrogram_length
a_ = num_channels
a_ = patch_size
a_ = feature_size // self.patch_size[1]
a_ = n_fft
a_ = sampling_rate // hop_length_to_sampling_rate
a_ = sampling_rate
a_ = padding_value
a_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=UpperCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> np.ndarray:
a_ = spectrogram(
UpperCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
a_ = log_spec[:, :-1]
a_ = log_spec - 2_0.0
a_ = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , **UpperCAmelCase__ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a_ = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
a_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a_ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCAmelCase__ ):
a_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a_ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a_ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a_ = np.array(UpperCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
a_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a_ = np.ones([len(UpperCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a_ = padded_audio_features * self.padding_value
for i in range(len(UpperCAmelCase__ ) ):
a_ = audio_features[i]
a_ = feature
# return as BatchFeature
if return_attention_mask:
a_ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
a_ = {'audio_values': padded_audio_features}
a_ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
return encoded_inputs
| 697 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Dict = False
def A_ ( _lowerCAmelCase : Namespace ):
"""simple docstring"""
return TrainCommand(_lowerCAmelCase )
class UpperCAmelCase__ ( A ):
@staticmethod
def lowerCamelCase_ ( __A : ArgumentParser ):
_lowerCamelCase : int = parser.add_parser("train",help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data",type=__A,required=__A,help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.",)
train_parser.add_argument(
"--column_label",type=__A,default=0,help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text",type=__A,default=1,help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id",type=__A,default=2,help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row",action="store_true",help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data",type=__A,default="",help="path to validation dataset." )
train_parser.add_argument(
"--validation_split",type=__A,default=0.1,help="if validation dataset is not provided, fraction of train dataset to use as validation dataset.",)
train_parser.add_argument("--output",type=__A,default="./",help="path to saved the trained model." )
train_parser.add_argument(
"--task",type=__A,default="text_classification",help="Task to train the model on." )
train_parser.add_argument(
"--model",type=__A,default="bert-base-uncased",help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size",type=__A,default=3_2,help="Batch size for training." )
train_parser.add_argument("--valid_batch_size",type=__A,default=6_4,help="Batch size for validation." )
train_parser.add_argument("--learning_rate",type=__A,default=3e-5,help="Learning rate." )
train_parser.add_argument("--adam_epsilon",type=__A,default=1e-08,help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=__A )
def __init__( self : List[Any],__A : Namespace ):
_lowerCamelCase : str = logging.get_logger("transformers-cli/training" )
_lowerCamelCase : Tuple = "tf" if is_tf_available() else "torch"
os.makedirs(args.output,exist_ok=__A )
_lowerCamelCase : Dict = args.output
_lowerCamelCase : List[str] = args.column_label
_lowerCamelCase : Tuple = args.column_text
_lowerCamelCase : Tuple = args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
_lowerCamelCase : int = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
_lowerCamelCase : Optional[int] = Processor.create_from_csv(
args.train_data,column_label=args.column_label,column_text=args.column_text,column_id=args.column_id,skip_first_row=args.skip_first_row,)
_lowerCamelCase : Any = None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
_lowerCamelCase : int = Processor.create_from_csv(
args.validation_data,column_label=args.column_label,column_text=args.column_text,column_id=args.column_id,skip_first_row=args.skip_first_row,)
_lowerCamelCase : Union[str, Any] = args.validation_split
_lowerCamelCase : Any = args.train_batch_size
_lowerCamelCase : Optional[int] = args.valid_batch_size
_lowerCamelCase : List[Any] = args.learning_rate
_lowerCamelCase : Union[str, Any] = args.adam_epsilon
def lowerCamelCase_ ( self : List[str] ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def lowerCamelCase_ ( self : int ):
raise NotImplementedError
def lowerCamelCase_ ( self : Optional[Any] ):
self.pipeline.fit(
self.train_dataset,validation_data=self.valid_dataset,validation_split=self.validation_split,learning_rate=self.learning_rate,adam_epsilon=self.adam_epsilon,train_batch_size=self.train_batch_size,valid_batch_size=self.valid_batch_size,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output ) | 44 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
a_ = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
a_ = [element for element in set_a if element in set_b]
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
a_ = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
__lowerCAmelCase ={"a", "b", "c", "d", "e"}
__lowerCAmelCase ={"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 697 | 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A ( lowercase__ : str , lowercase__ : complex , lowercase__ : str = "x" , lowercase__ : float = 10**-10 , lowercase__ : int = 1 , ) -> complex:
UpperCamelCase__ :Optional[int] = symbols(lowercase__ )
UpperCamelCase__ :Dict = lambdify(lowercase__ , lowercase__ )
UpperCamelCase__ :Any = lambdify(lowercase__ , diff(lowercase__ , lowercase__ ) )
UpperCamelCase__ :List[str] = starting_point
while True:
if diff_function(lowercase__ ) != 0:
UpperCamelCase__ :List[Any] = prev_guess - multiplicity * func(lowercase__ ) / diff_function(
lowercase__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase__ :Optional[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}''')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f'''{newton_raphson('exp(x) - 1', 10, precision=0.005)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''') | 45 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
a_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : int = filter(lambda _lowerCamelCase : p.requires_grad , model.parameters() )
_lowerCamelCase : Optional[int] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if metric == "rouge2":
_lowerCamelCase : Optional[int] = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_lowerCamelCase : Optional[int] = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_lowerCamelCase : Dict = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
_lowerCamelCase : List[Any] = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
_lowerCamelCase : Dict = ModelCheckpoint(
dirpath=_lowerCamelCase , filename=_lowerCamelCase , monitor=F"""val_{metric}""" , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=_lowerCamelCase , verbose=_lowerCamelCase , )
class A_ ( pl.Callback ):
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = {F"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__lowerCAmelCase )
@rank_zero_only
def _lowercase ( self: List[Any] ,__lowerCAmelCase: pl.Trainer ,__lowerCAmelCase: pl.LightningModule ,__lowerCAmelCase: str ,__lowerCAmelCase: Tuple=True ):
'''simple docstring'''
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_lowerCamelCase : Dict = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_lowerCamelCase : Optional[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
_lowerCamelCase : Optional[int] = od / "test_results.txt"
_lowerCamelCase : List[Any] = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_lowerCamelCase : str = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
_lowerCamelCase : Optional[int] = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__lowerCAmelCase )
generations_file.parent.mkdir(exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase ,"a+" ) as writer:
for key in sorted(__lowerCAmelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
_lowerCamelCase : Union[str, Any] = metrics[key]
if isinstance(__lowerCAmelCase ,torch.Tensor ):
_lowerCamelCase : Union[str, Any] = val.item()
_lowerCamelCase : Tuple = F"""{key}: {val:.6f}\n"""
writer.write(__lowerCAmelCase )
if not save_generations:
return
if "preds" in metrics:
_lowerCamelCase : List[str] = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__lowerCAmelCase )
@rank_zero_only
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: Any ):
'''simple docstring'''
try:
_lowerCamelCase : Optional[int] = pl_module.model.model.num_parameters()
except AttributeError:
_lowerCamelCase : Optional[int] = pl_module.model.num_parameters()
_lowerCamelCase : Dict = count_trainable_parameters(__lowerCAmelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: pl.Trainer ,__lowerCAmelCase: pl.LightningModule ):
'''simple docstring'''
save_json(pl_module.metrics ,pl_module.metrics_save_path )
return self._write_logs(__lowerCAmelCase ,__lowerCAmelCase ,"test" )
@rank_zero_only
def _lowercase ( self: List[str] ,__lowerCAmelCase: pl.Trainer ,__lowerCAmelCase: int ):
'''simple docstring'''
save_json(pl_module.metrics ,pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 46 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _snake_case ( snake_case ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> str:
super().__init__(
features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = Generator(
cache_dir=UpperCAmelCase__ , features=UpperCAmelCase__ , generator=UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
a_ = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
a_ = None
a_ = None
a_ = None
a_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , )
a_ = self.builder.as_dataset(
split='train' , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 697 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : str = '''xlnet'''
__SCREAMING_SNAKE_CASE : Dict = ['''mems''']
__SCREAMING_SNAKE_CASE : Dict = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str]=3_2_0_0_0 , SCREAMING_SNAKE_CASE__ : Any=1_0_2_4 , SCREAMING_SNAKE_CASE__ : Dict=2_4 , SCREAMING_SNAKE_CASE__ : List[str]=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4_0_9_6 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : str="bi" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=1e-12 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : int=5_1_2 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=-1 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Any="last" , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : List[Any]="tanh" , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : int=5 , SCREAMING_SNAKE_CASE__ : List[Any]=5 , SCREAMING_SNAKE_CASE__ : int=5 , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : Tuple=2 , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Tuple = vocab_size
__a : Dict = d_model
__a : str = n_layer
__a : str = n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
__a : str = d_model // n_head
__a : str = ff_activation
__a : Any = d_inner
__a : Dict = untie_r
__a : List[str] = attn_type
__a : str = initializer_range
__a : Tuple = layer_norm_eps
__a : List[str] = dropout
__a : str = mem_len
__a : List[Any] = reuse_len
__a : List[Any] = bi_data
__a : str = clamp_len
__a : List[str] = same_length
__a : Optional[int] = summary_type
__a : Any = summary_use_proj
__a : List[str] = summary_activation
__a : List[Any] = summary_last_dropout
__a : Union[str, Any] = start_n_top
__a : str = end_n_top
__a : List[str] = bos_token_id
__a : Union[str, Any] = pad_token_id
__a : Tuple = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , SCREAMING_SNAKE_CASE__ , )
__a : Dict = kwargs['use_cache']
__a : List[Any] = use_mems_eval
__a : List[Any] = use_mems_train
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 47 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase =numpy.array([0, 0])
__lowerCAmelCase =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase =numpy.array([1, 0])
__lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = initial_vectors
for _ in range(_UpperCAmelCase ):
a_ = iteration_step(_UpperCAmelCase )
return vectors
def a ( _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = []
for i, start_vector in enumerate(vectors[:-1] ):
a_ = vectors[i + 1]
new_vectors.append(_UpperCAmelCase )
a_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray:
"""simple docstring"""
a_ = numpy.radians(_UpperCAmelCase )
a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase )
a_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCAmelCase , _UpperCAmelCase )
def a ( _UpperCAmelCase ) -> None:
"""simple docstring"""
a_ = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a_ , a_ = zip(*_UpperCAmelCase )
plt.plot(_UpperCAmelCase , _UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 697 | 0 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
UpperCAmelCase__ : Tuple = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
UpperCAmelCase__ : int = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
UpperCAmelCase__ : str = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Optional[Any]=None , __magic_name__ : Dict=False , __magic_name__ : Any=False , __magic_name__ : List[Any]=False , ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCAmelCase__ = np.array([re.sub(__magic_name__ , "" , __magic_name__ ) for x in predictions] )
lowerCAmelCase__ = np.array([re.sub(__magic_name__ , "" , __magic_name__ ) for x in references] )
else:
lowerCAmelCase__ = np.asarray(__magic_name__ )
lowerCAmelCase__ = np.asarray(__magic_name__ )
if ignore_case:
lowerCAmelCase__ = np.char.lower(__magic_name__ )
lowerCAmelCase__ = np.char.lower(__magic_name__ )
if ignore_punctuation:
lowerCAmelCase__ = string.punctuation.maketrans("" , "" , string.punctuation )
lowerCAmelCase__ = np.char.translate(__magic_name__ , table=__magic_name__ )
lowerCAmelCase__ = np.char.translate(__magic_name__ , table=__magic_name__ )
if ignore_numbers:
lowerCAmelCase__ = string.digits.maketrans("" , "" , string.digits )
lowerCAmelCase__ = np.char.translate(__magic_name__ , table=__magic_name__ )
lowerCAmelCase__ = np.char.translate(__magic_name__ , table=__magic_name__ )
lowerCAmelCase__ = predictions == references
return {"exact_match": np.mean(__magic_name__ ) * 100}
| 48 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def a ( _UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for char in word:
a_ = ord(_UpperCAmelCase )
if not _is_chinese_char(_UpperCAmelCase ):
return 0
return 1
def a ( _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
a_ = set()
for token in tokens:
a_ = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase )
if chinese_word:
word_set.add(_UpperCAmelCase )
a_ = list(_UpperCAmelCase )
return word_list
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
a_ = max([len(_UpperCAmelCase ) for w in chinese_word_set] )
a_ = bert_tokens
a_ , a_ = 0, len(_UpperCAmelCase )
while start < end:
a_ = True
if is_chinese(bert_word[start] ):
a_ = min(end - start , _UpperCAmelCase )
for i in range(_UpperCAmelCase , 1 , -1 ):
a_ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a_ = '##' + bert_word[j]
a_ = start + i
a_ = False
break
if single_word:
start += 1
return bert_word
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
"""simple docstring"""
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws
a_ = [get_chinese_word(_UpperCAmelCase ) for r in res]
ltp_res.extend(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ):
a_ = []
for id in input_ids:
a_ = bert_tokenizer._convert_id_to_token(_UpperCAmelCase )
input_tokens.append(_UpperCAmelCase )
a_ = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase )
a_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCAmelCase ):
if token[:2] == "##":
a_ = token[2:]
# save chinese tokens' pos
if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ):
ref_id.append(_UpperCAmelCase )
ref_ids.append(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
return ref_ids
def a ( _UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
a_ = f.readlines()
a_ = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a_ = LTP(args.ltp ) # faster in GPU device
a_ = BertTokenizer.from_pretrained(args.bert )
a_ = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
a_ = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__lowerCAmelCase =parser.parse_args()
main(args)
| 697 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": 512,
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = RetriBertTokenizer
_UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__="[UNK]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[PAD]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> int:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
a_ = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) )
a_ = do_lower_case
a_ = strip_accents
a_ = tokenize_chinese_chars
a_ = normalizer_class(**UpperCAmelCase__ )
a_ = do_lower_case
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str:
a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]:
a_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 697 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase : int = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_UpperCamelCase = field(
default=a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_UpperCamelCase = field(
default='NER' ,metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
_UpperCamelCase = field(
default=a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_UpperCamelCase = field(default=a ,metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCamelCase = field(
default=a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
_UpperCamelCase = field(
default=a ,metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} ,)
_UpperCamelCase = field(
default=128 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
_UpperCamelCase = field(
default=a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def A__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
lowerCamelCase__ = import_module("""tasks""" )
try:
lowerCamelCase__ = getattr(__lowerCAmelCase , model_args.task_type )
lowerCamelCase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , __lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowerCamelCase__ = token_classification_task.get_labels(data_args.labels )
lowerCamelCase__ = dict(enumerate(__lowerCAmelCase ) )
lowerCamelCase__ = len(__lowerCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid={label: i for i, label in enumerate(__lowerCAmelCase )} , cache_dir=model_args.cache_dir , )
lowerCamelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowerCamelCase__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCamelCase__ = (
TokenClassificationDataset(
token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCamelCase__ = (
TokenClassificationDataset(
token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ) -> Tuple[List[int], List[int]]:
lowerCamelCase__ = np.argmax(__lowerCAmelCase , axis=2 )
lowerCamelCase__ , lowerCamelCase__ = preds.shape
lowerCamelCase__ = [[] for _ in range(__lowerCAmelCase )]
lowerCamelCase__ = [[] for _ in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowerCAmelCase : EvalPrediction ) -> Dict:
lowerCamelCase__ , lowerCamelCase__ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowerCAmelCase , __lowerCAmelCase ),
"precision": precision_score(__lowerCAmelCase , __lowerCAmelCase ),
"recall": recall_score(__lowerCAmelCase , __lowerCAmelCase ),
"f1": fa_score(__lowerCAmelCase , __lowerCAmelCase ),
}
# Data collator
lowerCamelCase__ = DataCollatorWithPadding(__lowerCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCamelCase__ = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase__ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase__ = trainer.evaluate()
lowerCamelCase__ = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , __lowerCAmelCase , __lowerCAmelCase )
writer.write("""%s = %s\n""" % (key, value) )
results.update(__lowerCAmelCase )
# Predict
if training_args.do_predict:
lowerCamelCase__ = TokenClassificationDataset(
token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = trainer.predict(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = align_predictions(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = os.path.join(training_args.output_dir , """test_results.txt""" )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , """w""" ) as writer:
for key, value in metrics.items():
logger.info(""" %s = %s""" , __lowerCAmelCase , __lowerCAmelCase )
writer.write("""%s = %s\n""" % (key, value) )
# Save predictions
lowerCamelCase__ = os.path.join(training_args.output_dir , """test_predictions.txt""" )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , """w""" ) as writer:
with open(os.path.join(data_args.data_dir , """test.txt""" ) , """r""" ) as f:
token_classification_task.write_predictions_to_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return results
def A__ ( __lowerCAmelCase : Union[str, Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 50 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "lilt"
def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__="absolute" , UpperCAmelCase__=None , UpperCAmelCase__=4 , UpperCAmelCase__=1024 , **UpperCAmelCase__ , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = classifier_dropout
a_ = channel_shrink_ratio
a_ = max_ad_position_embeddings
| 697 | 0 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
a__ : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Dict , *a__ : Optional[Any] , **a__ : List[Any] ):
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def __snake_case ( self : int , a__ : Union[str, Any]=None ):
UpperCAmelCase = {}
if top_k is not None:
UpperCAmelCase = top_k
return {}, {}, postprocess_params
def __call__( self : Any , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : List[str] ):
return super().__call__(a__ , **a__ )
def __snake_case ( self : Union[str, Any] , a__ : Dict ):
UpperCAmelCase = load_image(a__ )
UpperCAmelCase = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def __snake_case ( self : str , a__ : Optional[Any] ):
UpperCAmelCase = self.model(**a__ )
return model_outputs
def __snake_case ( self : Union[str, Any] , a__ : Tuple , a__ : Union[str, Any]=5 ):
if top_k > self.model.config.num_labels:
UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase, UpperCAmelCase = probs.topk(a__ )
elif self.framework == "tf":
UpperCAmelCase = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCAmelCase = tf.math.top_k(a__ , k=a__ )
UpperCAmelCase, UpperCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase = scores.tolist()
UpperCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(a__ , a__ )]
| 51 |
'''simple docstring'''
from __future__ import annotations
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# We need to create solution object to save path.
a_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
a_ = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase )
if solved:
print('\n'.join(str(_UpperCAmelCase ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
a_ = 1
return True
a_ = (not i < 0) and (not j < 0) # Check lower bounds
a_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
a_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
a_ = 1
# check for directions
if (
run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase )
):
return True
a_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''levit'''
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=16 , _UpperCAmelCase=[128, 256, 384] , _UpperCAmelCase=[4, 8, 12] , _UpperCAmelCase=[4, 4, 4] , _UpperCAmelCase=[16, 16, 16] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = image_size
__a : List[Any] = num_channels
__a : Dict = kernel_size
__a : Optional[int] = stride
__a : Optional[int] = padding
__a : Dict = hidden_sizes
__a : int = num_attention_heads
__a : Optional[int] = depths
__a : str = key_dim
__a : Union[str, Any] = drop_path_rate
__a : Optional[Any] = patch_size
__a : Tuple = attention_ratio
__a : int = mlp_ratio
__a : int = initializer_range
__a : int = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCamelCase ( self ):
return 1e-4 | 52 |
'''simple docstring'''
__lowerCAmelCase ={
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
__lowerCAmelCase ={
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
"""simple docstring"""
a_ = from_type.lower().strip('s' )
a_ = to_type.lower().strip('s' )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
a_ = METRIC_CONVERSION[from_sanitized]
a_ = METRIC_CONVERSION[to_sanitized]
a_ = 1
if from_exponent > to_exponent:
a_ = from_exponent - to_exponent
else:
a_ = -(to_exponent - from_exponent)
return value * pow(1_0 , _UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_snake_case : Dict = None
_snake_case : Optional[Any] = logging.get_logger(__name__)
_snake_case : str = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[Any] = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
_snake_case : Dict = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
_snake_case : Dict = '▁'
# Segments (not really needed)
_snake_case : Any = 0
_snake_case : Union[str, Any] = 1
_snake_case : Union[str, Any] = 2
_snake_case : Dict = 3
_snake_case : int = 4
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = """left"""
a_ = XLNetTokenizer
def __init__( self : Optional[Any] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : Any="<unk>" , lowerCAmelCase_ : Tuple="<sep>" , lowerCAmelCase_ : str="<pad>" , lowerCAmelCase_ : Optional[int]="<cls>" , lowerCAmelCase_ : List[str]="<mask>" , lowerCAmelCase_ : List[str]=["<eop>", "<eod>"] , **lowerCAmelCase_ : str , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
vocab_file=lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowerCAmelCase = 3
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = remove_space
__lowerCAmelCase = keep_accents
__lowerCAmelCase = vocab_file
__lowerCAmelCase = False if not self.vocab_file else True
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
return (out_vocab_file,)
| 53 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__lowerCAmelCase ="naver-clova-ix/donut-base"
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = DonutProcessor.from_pretrained(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
a_ = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
a_ = self.processor.tokenajson(UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 0 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowercase : Any =(
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowercase : Union[str, Any] =(
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowercase : List[str] =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowercase : str =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowercase : Union[str, Any] =(
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowercase : str =(
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowercase : int =(
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) )
UpperCAmelCase_ =["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
UpperCAmelCase_ , UpperCAmelCase_ =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def a__ ( lowercase__ = 1_0_0 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(lowercase__ ))
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand(lowercase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand(lowercase__ ) for hand in SORTED_HANDS]
UpperCAmelCase_ =poker_hands.copy()
shuffle(lowercase__ )
UpperCAmelCase_ =chain(sorted(lowercase__ ) )
for index, hand in enumerate(lowercase__ ):
assert hand == poker_hands[index]
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=lowercase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand("2C 4S AS 3D 5C" )
UpperCAmelCase_ =True
UpperCAmelCase_ =[5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =0
UpperCAmelCase_ =os.path.abspath(os.path.dirname(lowercase__ ) )
UpperCAmelCase_ =os.path.join(lowercase__ , "poker_hands.txt" )
with open(lowercase__ ) as file_hand:
for line in file_hand:
UpperCAmelCase_ =line[:1_4].strip()
UpperCAmelCase_ =line[1_5:].strip()
UpperCAmelCase_ , UpperCAmelCase_ =PokerHand(lowercase__ ), PokerHand(lowercase__ )
UpperCAmelCase_ =player.compare_with(lowercase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 54 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="resnet50" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , ) -> Optional[Any]:
a_ = parent
a_ = out_indices if out_indices is not None else [4]
a_ = stage_names
a_ = out_features
a_ = backbone
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = use_pretrained_backbone
a_ = is_training
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = TimmBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
a_ = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = self.prepare_config_and_inputs()
a_ , a_ = config_and_inputs
a_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (TimmBackbone,) if is_torch_available() else ()
_UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = TimmBackboneModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = 'resnet18'
a_ = 'microsoft/resnet-18'
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ , out_indices=[1, 2, 3] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
a_ = self.has_attentions
# no need to test all models as different heads yield the same functionality
a_ = self.all_model_classes[0]
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = model(**UpperCAmelCase__ )
a_ = outputs[0][-1]
# Encoder-/Decoder-only models
a_ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a_ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = None
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = False
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
| 697 | 0 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
__A = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
__A = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
__A = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") )
return token
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
__A = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> Dict:
"""simple docstring"""
__A = "imagenet-1k-id2label.json"
__A = 1_0_0_0
__A = "huggingface/label-files"
__A = num_labels
__A = json.load(open(cached_download(hf_hub_url(a_ , a_ , repo_type="dataset" ) ) , "r" ) )
__A = {int(a_ ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
__A = __A = CvtConfig(num_labels=a_ , idalabel=a_ , labelaid=a_ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
__A = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
__A = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__A = [2, 2, 2_0]
__A = [3, 1_2, 1_6]
__A = [1_9_2, 7_6_8, 1_0_2_4]
__A = CvtForImageClassification(a_ )
__A = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
__A = image_size
__A = torch.load(a_ , map_location=torch.device("cpu" ) )
__A = OrderedDict()
__A = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__A = list_of_state_dict + cls_token(a_ )
__A = list_of_state_dict + embeddings(a_ )
for cnt in range(config.depth[idx] ):
__A = list_of_state_dict + attention(a_ , a_ )
__A = list_of_state_dict + final()
for gg in list_of_state_dict:
print(a_ )
for i in range(len(a_ ) ):
__A = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE :List[Any] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 55 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , ) -> List[Any]:
a_ = size if size is not None else {'height': 18, 'width': 18}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = image_size
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = apply_ocr
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# with apply_OCR = True
a_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
a_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
a_ = Image.open(ds[0]['file'] ).convert('RGB' )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
a_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
a_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 697 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "sentencepiece.bpe.model"}
_a : List[Any] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
_a : Union[str, Any] = {
"moussaKam/mbarthez": 1_024,
"moussaKam/barthez": 1_024,
"moussaKam/barthez-orangesum-title": 1_024,
}
_a : Dict = "▁"
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[str] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Tuple="<s>" , SCREAMING_SNAKE_CASE_ : List[str]="<unk>" , SCREAMING_SNAKE_CASE_ : Any="<pad>" , SCREAMING_SNAKE_CASE_ : str="<mask>" , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
__snake_case = vocab_file
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
__snake_case = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__snake_case = len(self.sp_model ) - 1
__snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a ( self : List[str] ) -> Dict:
return len(self.sp_model )
def a ( self : Optional[int] ) -> List[str]:
__snake_case = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
return spm_id if spm_id else self.unk_token_id
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
__snake_case = []
__snake_case = ''
__snake_case = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
__snake_case = True
__snake_case = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
__snake_case = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self : List[str] ) -> Tuple:
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
__snake_case = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 56 |
'''simple docstring'''
import math
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( _UpperCAmelCase = 1_0_0_0_1 ) -> int:
"""simple docstring"""
try:
a_ = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
a_ = []
a_ = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Union[str, Any] = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 57 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = 10
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = [1, 2, 3, 4]
a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = ''
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
a_ , a_ = process_story(UpperCAmelCase__ )
a_ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = ['It was the best of times.']
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = torch.tensor([1, 2, 3, 4] )
a_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = 101
a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 0 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : str = "cpu" , __UpperCamelCase : Union[str, None] = None ):
'''simple docstring'''
snake_case_ : str = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__UpperCamelCase , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
snake_case_ : Tuple = v.half()
if save_path is None: # overwrite src_path
snake_case_ : List[str] = src_path
torch.save(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 58 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCAmelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class _snake_case ( unittest.TestCase , snake_case ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = load_tool('text-question-answering' )
self.tool.setup()
a_ = load_tool('text-question-answering' , remote=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.remote_tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = self.tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = self.remote_tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
| 697 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "big_bird"
def __init__(self : List[str] , UpperCAmelCase_ : int=50_358 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : Optional[int]=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : Union[str, Any]="gelu_new" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[Any]=4_096 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Tuple=1E-1_2 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Optional[Any]=66 , UpperCAmelCase_ : Optional[Any]="block_sparse" , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Optional[Any]=64 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : Optional[Any] , ) ->Dict:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , sep_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =vocab_size
lowerCamelCase__: str =max_position_embeddings
lowerCamelCase__: Union[str, Any] =hidden_size
lowerCamelCase__: Optional[Any] =num_hidden_layers
lowerCamelCase__: Union[str, Any] =num_attention_heads
lowerCamelCase__: Optional[Any] =intermediate_size
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: List[str] =hidden_dropout_prob
lowerCamelCase__: Optional[int] =attention_probs_dropout_prob
lowerCamelCase__: List[Any] =initializer_range
lowerCamelCase__: Union[str, Any] =type_vocab_size
lowerCamelCase__: Any =layer_norm_eps
lowerCamelCase__: Tuple =use_cache
lowerCamelCase__: Optional[int] =rescale_embeddings
lowerCamelCase__: int =attention_type
lowerCamelCase__: List[str] =use_bias
lowerCamelCase__: int =block_size
lowerCamelCase__: Tuple =num_random_blocks
lowerCamelCase__: Union[str, Any] =classifier_dropout
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__: Optional[int] ={0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__: str ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 59 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 697 | 0 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case_ : Dict = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Any = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', '''stage2.cls_token''') )
return token
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Any = '''imagenet-1k-id2label.json'''
snake_case_ : List[Any] = 1_000
snake_case_ : Dict = '''huggingface/label-files'''
snake_case_ : List[str] = num_labels
snake_case_ : List[str] = json.load(open(cached_download(hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
snake_case_ : Dict = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
snake_case_ : List[str] = idalabel
snake_case_ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case_ : Tuple = CvtConfig(num_labels=_UpperCamelCase , idalabel=_UpperCamelCase , labelaid=_UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
snake_case_ : Optional[int] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
snake_case_ : Union[str, Any] = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case_ : List[Any] = [2, 2, 20]
snake_case_ : str = [3, 12, 16]
snake_case_ : Union[str, Any] = [192, 768, 1_024]
snake_case_ : Optional[int] = CvtForImageClassification(_UpperCamelCase )
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
snake_case_ : List[str] = image_size
snake_case_ : Any = torch.load(_UpperCamelCase , map_location=torch.device('''cpu''' ) )
snake_case_ : Optional[Any] = OrderedDict()
snake_case_ : List[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case_ : Dict = list_of_state_dict + cls_token(_UpperCamelCase )
snake_case_ : List[Any] = list_of_state_dict + embeddings(_UpperCamelCase )
for cnt in range(config.depth[idx] ):
snake_case_ : Tuple = list_of_state_dict + attention(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Dict = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
snake_case_ : str = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
image_processor.save_pretrained(_UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=3_8_4,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 60 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "vit"
def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict:
super().__init__(**UpperCAmelCase__ )
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = encoder_stride
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
return 1e-4
| 697 | 0 |
import sys
UpperCamelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _A ( lowerCAmelCase_ : str = N ):
"""simple docstring"""
lowerCAmelCase__ = -sys.maxsize - 1
for i in range(len(lowerCAmelCase_ ) - 12 ):
lowerCAmelCase__ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowerCAmelCase__ = product
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 61 |
'''simple docstring'''
def a ( _UpperCAmelCase = 5_0 ) -> int:
"""simple docstring"""
a_ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 0 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCamelCase__ ( lowercase , lowercase=10 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = []
for _ in range(lowercase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCamelCase__ ( lowercase , lowercase=10 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = []
for step in range(lowercase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Dict = os.path.join(lowercase , "schedule.bin" )
torch.save(scheduler.state_dict() , lowercase )
SCREAMING_SNAKE_CASE : List[Any] = torch.load(lowercase )
scheduler.load_state_dict(lowercase )
return lrs
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict ):
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertAlmostEqual(UpperCAmelCase_ , UpperCAmelCase_ , delta=UpperCAmelCase_ )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.tensor([0.4, 0.2, -0.5] )
SCREAMING_SNAKE_CASE : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE : Dict = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
SCREAMING_SNAKE_CASE : Optional[Any] = criterion(UpperCAmelCase_ , UpperCAmelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([0.4, 0.2, -0.5] )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE : List[str] = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase_ , weight_decay=0.0 , relative_step=UpperCAmelCase_ , scale_parameter=UpperCAmelCase_ , warmup_init=UpperCAmelCase_ , )
for _ in range(1000 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = criterion(UpperCAmelCase_ , UpperCAmelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCamelCase_ : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
UpperCamelCase_ : str = 1_0
def _A ( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=None ):
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertAlmostEqual(UpperCAmelCase_ , UpperCAmelCase_ , delta=UpperCAmelCase_ , msg=UpperCAmelCase_ )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : List[Any] = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = data
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler_func(self.optimizer , **UpperCAmelCase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
SCREAMING_SNAKE_CASE : Optional[int] = unwrap_schedule(UpperCAmelCase_ , self.num_steps )
self.assertListAlmostEqual(
UpperCAmelCase_ , UpperCAmelCase_ , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_func(self.optimizer , **UpperCAmelCase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase_ ) # wrap to test picklability of the schedule
SCREAMING_SNAKE_CASE : Optional[Any] = unwrap_and_save_reload_schedule(UpperCAmelCase_ , self.num_steps )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ , msg=f'''failed for {scheduler_func} in save and reload''' )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : Union[str, Any] = fn
def __call__( self : Any , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict ):
return self.fn(*UpperCAmelCase_ , **UpperCAmelCase_ )
@classmethod
def _A ( self : Tuple , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = list(map(self , scheduler.lr_lambdas ) )
| 62 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def a ( ) -> None:
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 697 | 0 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
__UpperCAmelCase : List[str] = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = 0, 0, 0
__UpperCAmelCase : Any = ugly_nums[ia] * 2
__UpperCAmelCase : Optional[Any] = ugly_nums[ia] * 3
__UpperCAmelCase : List[Any] = ugly_nums[ia] * 5
for _ in range(1 , __lowerCamelCase ):
__UpperCAmelCase : str = min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
ugly_nums.append(__lowerCamelCase )
if next_num == next_a:
ia += 1
__UpperCAmelCase : Dict = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__UpperCAmelCase : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__UpperCAmelCase : int = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(200) = }""")
| 63 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _snake_case ( snake_case ):
"""simple docstring"""
@staticmethod
def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> str:
a_ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=UpperCAmelCase__ , help='Name of the model to download' )
download_parser.set_defaults(func=UpperCAmelCase__ )
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = model
a_ = cache
a_ = force
a_ = trust_remote_code
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 697 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def A__ ( snake_case_ : Any ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= image.size
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE__: Tuple= image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
SCREAMING_SNAKE_CASE__: List[Any]= np.array(snake_case_ ).astype(np.floataa ) / 2_55.0
SCREAMING_SNAKE_CASE__: Optional[int]= image[None].transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__: Dict= torch.from_numpy(snake_case_ )
return 2.0 * image - 1.0
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> List[Any]:
super().__init__()
self.register_modules(vqvae=lowerCAmelCase , unet=lowerCAmelCase , scheduler=lowerCAmelCase )
@torch.no_grad()
def __call__( self , lowerCAmelCase = None , lowerCAmelCase = 1 , lowerCAmelCase = 100 , lowerCAmelCase = 0.0 , lowerCAmelCase = None , lowerCAmelCase = "pil" , lowerCAmelCase = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(lowerCAmelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__: str= 1
elif isinstance(lowerCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__: int= image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCAmelCase )}' )
if isinstance(lowerCAmelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= preprocess(lowerCAmelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
SCREAMING_SNAKE_CASE__: Union[str, Any]= (batch_size, self.unet.config.in_channels // 2, height, width)
SCREAMING_SNAKE_CASE__: Optional[Any]= next(self.unet.parameters() ).dtype
SCREAMING_SNAKE_CASE__: Optional[int]= randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=self.device , dtype=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= image.to(device=self.device , dtype=lowerCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCAmelCase , device=self.device )
SCREAMING_SNAKE_CASE__: Any= self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE__: List[Any]= latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE__: Dict= '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE__: str= {}
if accepts_eta:
SCREAMING_SNAKE_CASE__: List[Any]= eta
for t in self.progress_bar(lowerCAmelCase ):
# concat latents and low resolution image in the channel dimension.
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.cat([latents, image] , dim=1 )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
# predict the noise residual
SCREAMING_SNAKE_CASE__: Dict= self.unet(lowerCAmelCase , lowerCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__: Any= self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
# decode the image latents with the VQVAE
SCREAMING_SNAKE_CASE__: Tuple= self.vqvae.decode(lowerCAmelCase ).sample
SCREAMING_SNAKE_CASE__: Optional[int]= torch.clamp(lowerCAmelCase , -1.0 , 1.0 )
SCREAMING_SNAKE_CASE__: Optional[Any]= image / 2 + 0.5
SCREAMING_SNAKE_CASE__: List[Any]= image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__: Tuple= self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
| 64 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
assert column_title.isupper()
a_ = 0
a_ = len(_UpperCAmelCase ) - 1
a_ = 0
while index >= 0:
a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '▁'
__UpperCAmelCase = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
__UpperCAmelCase = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
__UpperCAmelCase = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
__UpperCAmelCase = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class __lowercase ( __lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = ["""input_ids""", """attention_mask"""]
snake_case_ = []
snake_case_ = []
def __init__( self : int ,A : List[Any] ,A : str ,A : List[Any]=None ,A : Dict=None ,A : str="<s>" ,A : int="</s>" ,A : List[Any]="</s>" ,A : Optional[Any]="<pad>" ,A : List[str]="<unk>" ,A : Optional[Any]="m2m100" ,A : Optional[Dict[str, Any]] = None ,A : List[str]=8 ,**A : Optional[Any] ,):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase__ : List[str] = language_codes
UpperCAmelCase__ : Dict = FAIRSEQ_LANGUAGE_CODES[language_codes]
UpperCAmelCase__ : Optional[int] = {lang_code: f"__{lang_code}__" for lang_code in fairseq_language_code}
UpperCAmelCase__ : Tuple = kwargs.get("""additional_special_tokens""" ,[] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A )
for lang_code in fairseq_language_code
if self.get_lang_token(A ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A ,tgt_lang=A ,bos_token=A ,eos_token=A ,sep_token=A ,unk_token=A ,pad_token=A ,language_codes=A ,sp_model_kwargs=self.sp_model_kwargs ,num_madeup_words=A ,**A ,)
UpperCAmelCase__ : str = vocab_file
UpperCAmelCase__ : Dict = load_json(A )
UpperCAmelCase__ : Optional[int] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ : Dict = spm_file
UpperCAmelCase__ : List[Any] = load_spm(A ,self.sp_model_kwargs )
UpperCAmelCase__ : Dict = len(self.encoder )
UpperCAmelCase__ : Dict = {
self.get_lang_token(A ): self.encoder_size + i for i, lang_code in enumerate(A )
}
UpperCAmelCase__ : Union[str, Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(A )}
UpperCAmelCase__ : Union[str, Any] = {v: k for k, v in self.lang_token_to_id.items()}
UpperCAmelCase__ : List[str] = src_lang if src_lang is not None else """en"""
UpperCAmelCase__ : int = tgt_lang
UpperCAmelCase__ : int = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
UpperCAmelCase__ : List[str] = num_madeup_words
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __lowercase ( self : List[str] ,A : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowercase ( self : Optional[Any] ,A : str ):
'''simple docstring'''
return self.sp_model.encode(A ,out_type=A )
def __lowercase ( self : List[str] ,A : Dict ):
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A ,self.encoder[self.unk_token] )
def __lowercase ( self : Any ,A : int ):
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A ,self.unk_token )
def __lowercase ( self : Tuple ,A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
UpperCAmelCase__ : str = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def __lowercase ( self : Optional[int] ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
UpperCAmelCase__ : Union[str, Any] = [1] * len(self.prefix_tokens )
UpperCAmelCase__ : Optional[int] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A )) + suffix_ones
return prefix_ones + ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def __lowercase ( self : Tuple ,A : List[int] ,A : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.__dict__.copy()
UpperCAmelCase__ : Dict = None
return state
def __setstate__( self : int ,A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = load_spm(self.spm_file ,self.sp_model_kwargs )
def __lowercase ( self : Any ,A : str ,A : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = Path(A )
if not save_dir.is_dir():
raise OSError(f"{save_directory} should be a directory" )
UpperCAmelCase__ : Tuple = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
UpperCAmelCase__ : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder ,A )
if os.path.abspath(self.spm_file ) != os.path.abspath(A ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file ,A )
elif not os.path.isfile(self.spm_file ):
with open(A ,"""wb""" ) as fi:
UpperCAmelCase__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(A )
return (str(A ), str(A ))
def __lowercase ( self : str ,A : List[str] ,A : str = "en" ,A : Optional[List[str]] = None ,A : str = "ro" ,**A : List[Any] ,):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = src_lang
UpperCAmelCase__ : str = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A ,A ,**A )
def __lowercase ( self : Any ,A : Union[str, Any] ,A : Optional[str] ,A : Optional[str] ,**A : List[Any] ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCAmelCase__ : List[Any] = src_lang
UpperCAmelCase__ : List[str] = self(A ,add_special_tokens=A ,**A )
UpperCAmelCase__ : List[Any] = self.get_lang_id(A )
UpperCAmelCase__ : List[str] = tgt_lang_id
return inputs
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowercase ( self : int ,A : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_lang_token(A )
UpperCAmelCase__ : List[str] = self.lang_token_to_id[lang_token]
UpperCAmelCase__ : Union[str, Any] = [self.cur_lang_id]
UpperCAmelCase__ : int = [self.eos_token_id]
def __lowercase ( self : Dict ,A : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_lang_token(A )
UpperCAmelCase__ : List[Any] = self.lang_token_to_id[lang_token]
UpperCAmelCase__ : Optional[int] = [self.cur_lang_id]
UpperCAmelCase__ : str = [self.eos_token_id]
def __lowercase ( self : int ,A : str ):
'''simple docstring'''
return self.lang_code_to_token[lang]
def __lowercase ( self : Dict ,A : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_lang_token(A )
return self.lang_token_to_id[lang_token]
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = sentencepiece.SentencePieceProcessor(**__UpperCamelCase )
spm.Load(str(__UpperCamelCase ) )
return spm
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" ) as f:
return json.load(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase , indent=2 )
| 65 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase =logging.get_logger(__name__)
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , UpperCAmelCase__=2048 , UpperCAmelCase__=1 , UpperCAmelCase__=[16, 16] , UpperCAmelCase__=128 , UpperCAmelCase__=4_4100 , UpperCAmelCase__=86 , UpperCAmelCase__=2048 , UpperCAmelCase__=0.0 , **UpperCAmelCase__ , ) -> Union[str, Any]:
super().__init__(
feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = spectrogram_length
a_ = num_channels
a_ = patch_size
a_ = feature_size // self.patch_size[1]
a_ = n_fft
a_ = sampling_rate // hop_length_to_sampling_rate
a_ = sampling_rate
a_ = padding_value
a_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=UpperCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> np.ndarray:
a_ = spectrogram(
UpperCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
a_ = log_spec[:, :-1]
a_ = log_spec - 2_0.0
a_ = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , **UpperCAmelCase__ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a_ = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
a_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a_ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCAmelCase__ ):
a_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a_ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a_ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a_ = np.array(UpperCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
a_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a_ = np.ones([len(UpperCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a_ = padded_audio_features * self.padding_value
for i in range(len(UpperCAmelCase__ ) ):
a_ = audio_features[i]
a_ = feature
# return as BatchFeature
if return_attention_mask:
a_ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
a_ = {'audio_values': padded_audio_features}
a_ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
return encoded_inputs
| 697 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = FlaxAutoencoderKL
@property
def __a ( self ):
_lowercase : Optional[Any] = 4
_lowercase : List[str] = 3
_lowercase : int = (3_2, 3_2)
_lowercase : List[str] = jax.random.PRNGKey(0 )
_lowercase : Union[str, Any] = jax.random.uniform(_lowerCAmelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __a ( self ):
_lowercase : Union[str, Any] = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
_lowercase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
| 66 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
a_ = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
a_ = [element for element in set_a if element in set_b]
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
a_ = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
__lowerCAmelCase ={"a", "b", "c", "d", "e"}
__lowerCAmelCase ={"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 697 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
a_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 0 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
raise RuntimeError("""CUDA out of memory.""" )
class _A ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] ) -> str:
super().__init__()
__UpperCAmelCase =nn.Linear(3 , 4 )
__UpperCAmelCase =nn.BatchNormad(4 )
__UpperCAmelCase =nn.Linear(4 , 5 )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : str ) -> int:
return self.lineara(self.batchnorm(self.lineara(__SCREAMING_SNAKE_CASE ) ) )
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ) -> List[Any]:
__UpperCAmelCase =[]
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE : List[str] ):
nonlocal batch_sizes
batch_sizes.append(__SCREAMING_SNAKE_CASE )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__SCREAMING_SNAKE_CASE , [128, 64, 32, 16, 8] )
def _a ( self : List[Any] ) -> Optional[Any]:
__UpperCAmelCase =[]
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] ):
nonlocal batch_sizes
batch_sizes.append(__SCREAMING_SNAKE_CASE )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCAmelCase , __UpperCAmelCase =mock_training_loop_function("""hello""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def _a ( self : Tuple ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE : Any ):
pass
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _a ( self : str ) -> Dict:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE : List[Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _a ( self : Optional[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def _a ( self : Any ) -> Tuple:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE : Tuple ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def _a ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase =torch.cuda.memory_allocated()
__UpperCAmelCase =ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =release_memory(__SCREAMING_SNAKE_CASE )
self.assertEqual(torch.cuda.memory_allocated() , __SCREAMING_SNAKE_CASE )
| 68 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _snake_case ( snake_case ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> str:
super().__init__(
features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = Generator(
cache_dir=UpperCAmelCase__ , features=UpperCAmelCase__ , generator=UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
a_ = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
a_ = None
a_ = None
a_ = None
a_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , )
a_ = self.builder.as_dataset(
split='train' , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 697 | 0 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
a : Tuple = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
a : Optional[int] = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
a : Tuple = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
a : Optional[int] = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
a : Optional[int] = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def A ( self : Dict ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def A ( self : str , a_ : List[Any] , a_ : Optional[int] , a_ : Any=[1, 10, 100] , a_ : Optional[int]=4 , a_ : Optional[int]=3.0 ):
"""simple docstring"""
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=a_ ) as executor:
__snake_case = []
__snake_case = Counter()
__snake_case = 0
__snake_case = defaultdict(a_ )
for task_id, (candidates, test_case) in enumerate(zip(a_ , a_ ) ):
for candidate in candidates:
__snake_case = candidate + "\n" + test_case
__snake_case = (test_program, timeout, task_id, completion_id[task_id])
__snake_case = executor.submit(a_ , *a_ )
futures.append(a_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(a_ ):
__snake_case = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
__snake_case , __snake_case = [], []
for result in results.values():
result.sort()
__snake_case = [r[1]["passed"] for r in result]
total.append(len(a_ ) )
correct.append(sum(a_ ) )
__snake_case = np.array(a_ )
__snake_case = np.array(a_ )
__snake_case = k
__snake_case = {f'''pass@{k}''': estimate_pass_at_k(a_ , a_ , a_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ) -> List[Any]:
def estimator(_UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__snake_case = itertools.repeat(_UpperCAmelCase , len(_UpperCAmelCase ) )
else:
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
__snake_case = iter(_UpperCAmelCase )
return np.array([estimator(int(_UpperCAmelCase ) , int(_UpperCAmelCase ) , _UpperCAmelCase ) for n, c in zip(_UpperCAmelCase , _UpperCAmelCase )] )
| 69 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase =numpy.array([0, 0])
__lowerCAmelCase =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase =numpy.array([1, 0])
__lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = initial_vectors
for _ in range(_UpperCAmelCase ):
a_ = iteration_step(_UpperCAmelCase )
return vectors
def a ( _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = []
for i, start_vector in enumerate(vectors[:-1] ):
a_ = vectors[i + 1]
new_vectors.append(_UpperCAmelCase )
a_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray:
"""simple docstring"""
a_ = numpy.radians(_UpperCAmelCase )
a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase )
a_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCAmelCase , _UpperCAmelCase )
def a ( _UpperCAmelCase ) -> None:
"""simple docstring"""
a_ = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a_ , a_ = zip(*_UpperCAmelCase )
plt.plot(_UpperCAmelCase , _UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 697 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
lowerCamelCase : Tuple = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
lowerCamelCase : Any = {
"abeja/gpt-neox-japanese-2.7b": 2_048,
}
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
with open(lowercase , 'r' , encoding='utf-8' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = collections.OrderedDict()
lowerCamelCase_ = collections.OrderedDict()
lowerCamelCase_ = collections.OrderedDict()
with open(lowercase , 'r' , encoding='utf-8' ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowercase ):
lowerCamelCase_ = b
lowerCamelCase_ = idx
for wd in b:
lowerCamelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : str , A_ : Any , A_ : Any , A_ : Optional[Any]="<|endoftext|>" , A_ : Any="<|endoftext|>" , A_ : Optional[int]="<|startoftext|>" , A_ : Union[str, Any]="<|endoftext|>" , A_ : Any=False , **A_ : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__(
unk_token=A_ , pad_token=A_ , bos_token=A_ , eos_token=A_ , do_clean_text=A_ , **A_ , )
if not os.path.isfile(A_ ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(A_ ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
lowerCamelCase_ = do_clean_text
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = load_vocab_and_emoji(A_ , A_ )
lowerCamelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
return len(self.raw_vocab )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def a__ ( self : Optional[Any] , A_ : str ) -> Tuple:
"""simple docstring"""
return self.subword_tokenizer.tokenize(A_ , clean=self.do_clean_text )
def a__ ( self : Optional[int] , A_ : Dict ) -> List[Any]:
"""simple docstring"""
return self.vocab.get(A_ , self.vocab.get(self.unk_token ) )
def a__ ( self : Union[str, Any] , A_ : Union[str, Any] ) -> int:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(A_ )
def a__ ( self : Optional[int] , A_ : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = ''.join(A_ ).strip()
return out_string
def a__ ( self : Optional[Any] , A_ : "Conversation" ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A_ , add_special_tokens=A_ ) + [self.eos_token_id] )
if len(A_ ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
return input_ids
def a__ ( self : List[Any] , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCamelCase_ = 0
if os.path.isdir(A_ ):
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
lowerCamelCase_ = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
lowerCamelCase_ = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(A_ , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(','.join(A_ ) + '\n' )
index += 1
with open(A_ , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , A_ )
return vocab_file, emoji_file
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Any , A_ : Union[str, Any] , A_ : int , A_ : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = vocab # same as swe
lowerCamelCase_ = ids_to_tokens # same as bpe
lowerCamelCase_ = emoji
lowerCamelCase_ = np.max([len(A_ ) for w in self.vocab.keys()] )
lowerCamelCase_ = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
lowerCamelCase_ = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
lowerCamelCase_ = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
lowerCamelCase_ = re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowerCamelCase_ = re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowerCamelCase_ = re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
lowerCamelCase_ = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
lowerCamelCase_ = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
lowerCamelCase_ = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : str ) -> Optional[int]:
"""simple docstring"""
return len(self.ids_to_tokens )
def a__ ( self : Union[str, Any] , A_ : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.content_repattera.sub('<URL>' , A_ )
lowerCamelCase_ = self.content_repattera.sub('<EMAIL>' , A_ )
lowerCamelCase_ = self.content_repattera.sub('<TEL>' , A_ )
lowerCamelCase_ = self.content_repattera.sub('<DATE>' , A_ )
lowerCamelCase_ = self.content_repattera.sub('<DATE>' , A_ )
lowerCamelCase_ = self.content_repattera.sub('<PRICE>' , A_ )
lowerCamelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowerCamelCase_ = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def a__ ( self : int , A_ : Optional[Any] , A_ : Tuple=False ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = text.replace(' ' , '<SP>' )
lowerCamelCase_ = text.replace(' ' , '<SP>' )
lowerCamelCase_ = text.replace('\r\n' , '<BR>' )
lowerCamelCase_ = text.replace('\n' , '<BR>' )
lowerCamelCase_ = text.replace('\r' , '<BR>' )
lowerCamelCase_ = text.replace('\t' , '<TAB>' )
lowerCamelCase_ = text.replace('—' , 'ー' )
lowerCamelCase_ = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCamelCase_ = text.replace(A_ , A_ )
if clean:
lowerCamelCase_ = self.clean_text(A_ )
def check_simbol(A_ : Union[str, Any] ):
lowerCamelCase_ = x.encode()
if len(A_ ) == 1 and len(A_ ) == 2:
lowerCamelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2_A1 and c <= 0XC2_BF)
or (c >= 0XC7_80 and c <= 0XC7_83)
or (c >= 0XCA_B9 and c <= 0XCB_BF)
or (c >= 0XCC_80 and c <= 0XCD_A2)
):
return True
return False
def checkuae(A_ : Tuple ):
lowerCamelCase_ = x.encode()
if len(A_ ) == 1 and len(A_ ) == 3:
lowerCamelCase_ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_80_80 and c <= 0XE2_B0_7F:
return True
return False
lowerCamelCase_ = 0
lowerCamelCase_ = []
while pos < len(A_ ):
lowerCamelCase_ = min(len(A_ ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
lowerCamelCase_ = [] # (token_id, token, pos)
for e in range(A_ , A_ , -1 ):
lowerCamelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(A_ ) > 2:
lowerCamelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(A_ ) > 0:
# the smallest token_id is adopted
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = sorted(A_ , key=lambda A_ : x[0] )[0]
result.append(A_ )
lowerCamelCase_ = e
else:
lowerCamelCase_ = pos + 1
lowerCamelCase_ = text[pos:end]
if check_simbol(A_ ):
result.append('<KIGOU>' )
elif checkuae(A_ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
lowerCamelCase_ = end
return result
def a__ ( self : List[Any] , A_ : Tuple , A_ : List[str]="\n" ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(A_ ) > 0:
words.append(bytearray(A_ ).decode('utf-8' , errors='replace' ) )
lowerCamelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(A_ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(A_ )
if len(A_ ) > 0:
words.append(bytearray(A_ ).decode('utf-8' , errors='replace' ) )
lowerCamelCase_ = ''.join(A_ )
return text
| 70 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def a ( _UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for char in word:
a_ = ord(_UpperCAmelCase )
if not _is_chinese_char(_UpperCAmelCase ):
return 0
return 1
def a ( _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
a_ = set()
for token in tokens:
a_ = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase )
if chinese_word:
word_set.add(_UpperCAmelCase )
a_ = list(_UpperCAmelCase )
return word_list
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
a_ = max([len(_UpperCAmelCase ) for w in chinese_word_set] )
a_ = bert_tokens
a_ , a_ = 0, len(_UpperCAmelCase )
while start < end:
a_ = True
if is_chinese(bert_word[start] ):
a_ = min(end - start , _UpperCAmelCase )
for i in range(_UpperCAmelCase , 1 , -1 ):
a_ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a_ = '##' + bert_word[j]
a_ = start + i
a_ = False
break
if single_word:
start += 1
return bert_word
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
"""simple docstring"""
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws
a_ = [get_chinese_word(_UpperCAmelCase ) for r in res]
ltp_res.extend(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ):
a_ = []
for id in input_ids:
a_ = bert_tokenizer._convert_id_to_token(_UpperCAmelCase )
input_tokens.append(_UpperCAmelCase )
a_ = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase )
a_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCAmelCase ):
if token[:2] == "##":
a_ = token[2:]
# save chinese tokens' pos
if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ):
ref_id.append(_UpperCAmelCase )
ref_ids.append(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
return ref_ids
def a ( _UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
a_ = f.readlines()
a_ = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a_ = LTP(args.ltp ) # faster in GPU device
a_ = BertTokenizer.from_pretrained(args.bert )
a_ = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
a_ = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__lowerCAmelCase =parser.parse_args()
main(args)
| 697 | 0 |
'''simple docstring'''
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE : tuple[int, int] , _SCREAMING_SNAKE_CASE : int ) -> list[tuple[int, int]]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = position
UpperCAmelCase_ : List[str] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ : List[str] = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ : Any = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_SCREAMING_SNAKE_CASE )
return permissible_positions
def a__ ( _SCREAMING_SNAKE_CASE : list[list[int]] ) -> bool:
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def a__ ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : tuple[int, int] , _SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if is_complete(_SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = position
if board[y][x] == 0:
UpperCAmelCase_ : int = curr + 1
if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , curr + 1 ):
return True
UpperCAmelCase_ : int = 0
return False
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> list[list[int]]:
"""simple docstring"""
UpperCAmelCase_ : str = [[0 for i in range(_SCREAMING_SNAKE_CASE )] for j in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = 1
if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Union[str, Any] = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": 512,
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = RetriBertTokenizer
_UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__="[UNK]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[PAD]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> int:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
a_ = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) )
a_ = do_lower_case
a_ = strip_accents
a_ = tokenize_chinese_chars
a_ = normalizer_class(**UpperCAmelCase__ )
a_ = do_lower_case
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str:
a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]:
a_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 697 | 0 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 72 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "lilt"
def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__="absolute" , UpperCAmelCase__=None , UpperCAmelCase__=4 , UpperCAmelCase__=1024 , **UpperCAmelCase__ , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = classifier_dropout
a_ = channel_shrink_ratio
a_ = max_ad_position_embeddings
| 697 | 0 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a_ : List[Any] = False
class _snake_case ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion')
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg')
SCREAMING_SNAKE_CASE = torch.manual_seed(0)
SCREAMING_SNAKE_CASE = pipe(
image=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
SCREAMING_SNAKE_CASE = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 73 |
'''simple docstring'''
from __future__ import annotations
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# We need to create solution object to save path.
a_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
a_ = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase )
if solved:
print('\n'.join(str(_UpperCAmelCase ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
a_ = 1
return True
a_ = (not i < 0) and (not j < 0) # Check lower bounds
a_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
a_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
a_ = 1
# check for directions
if (
run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase )
):
return True
a_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = ShapEPipeline
lowerCAmelCase_ = ['''prompt''']
lowerCAmelCase_ = ['''prompt''']
lowerCAmelCase_ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase_ = False
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return 8
@property
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
__SCREAMING_SNAKE_CASE : List[Any] = PriorTransformer(**_A )
return model
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = ShapERenderer(**_A )
return model
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.dummy_prior
__SCREAMING_SNAKE_CASE : int = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_renderer
__SCREAMING_SNAKE_CASE : Tuple = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
__SCREAMING_SNAKE_CASE : Dict = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCAmelCase__ ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ):
"""simple docstring"""
if str(_A ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(_A )
else:
__SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=_A ).manual_seed(_A )
__SCREAMING_SNAKE_CASE : int = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = '''cpu'''
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**_A )
__SCREAMING_SNAKE_CASE : str = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE : Dict = pipe(**self.get_dummy_inputs(_A ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
__SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__SCREAMING_SNAKE_CASE : str = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : List[str] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_A )
__SCREAMING_SNAKE_CASE : Dict = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE : Tuple = 1
__SCREAMING_SNAKE_CASE : Any = 2
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
__SCREAMING_SNAKE_CASE : str = batch_size * [inputs[key]]
__SCREAMING_SNAKE_CASE : str = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
__SCREAMING_SNAKE_CASE : str = ShapEPipeline.from_pretrained('''openai/shap-e''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE : Any = torch.Generator(device=_A ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(
'''a shark''' , generator=_A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 74 |
'''simple docstring'''
__lowerCAmelCase ={
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
__lowerCAmelCase ={
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
"""simple docstring"""
a_ = from_type.lower().strip('s' )
a_ = to_type.lower().strip('s' )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
a_ = METRIC_CONVERSION[from_sanitized]
a_ = METRIC_CONVERSION[to_sanitized]
a_ = 1
if from_exponent > to_exponent:
a_ = from_exponent - to_exponent
else:
a_ = -(to_exponent - from_exponent)
return value * pow(1_0 , _UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self : Dict , _A : bool = True , _A : int = 32 , _A : Optional[Any]=PILImageResampling.BILINEAR , _A : bool = True , **_A : List[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = do_resize
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : List[Any] = size_divisor
UpperCAmelCase__ : List[Any] = resample
super().__init__(**_A )
def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : int , _A : Optional[int] , _A : Optional[ChannelDimension] = None , **_A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = get_image_size(_A )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase__ : Union[str, Any] = height // size_divisor * size_divisor
UpperCAmelCase__ : Dict = width // size_divisor * size_divisor
UpperCAmelCase__ : Any = resize(_A , (new_h, new_w) , resample=_A , data_format=_A , **_A )
return image
def lowercase_ ( self : Optional[int] , _A : np.ndarray , _A : float , _A : Optional[ChannelDimension] = None , **_A : Dict ):
'''simple docstring'''
return rescale(image=_A , scale=_A , data_format=_A , **_A )
def lowercase_ ( self : Tuple , _A : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , _A : Optional[bool] = None , _A : Optional[int] = None , _A : Any=None , _A : Optional[bool] = None , _A : Optional[Union[TensorType, str]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : str = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase__ : int = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
UpperCAmelCase__ : Optional[int] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : List[str] = [to_numpy_array(_A ) for img in images]
if do_resize:
UpperCAmelCase__ : Optional[int] = [self.resize(_A , size_divisor=_A , resample=_A ) for image in images]
if do_rescale:
UpperCAmelCase__ : List[str] = [self.rescale(_A , scale=1 / 255 ) for image in images]
UpperCAmelCase__ : Optional[int] = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 75 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__lowerCAmelCase ="naver-clova-ix/donut-base"
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = DonutProcessor.from_pretrained(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
a_ = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
a_ = self.processor.tokenajson(UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=None , ) -> List[Any]:
__lowercase : int = parent
__lowercase : Optional[Any] = batch_size
__lowercase : Union[str, Any] = seq_length
__lowercase : Optional[int] = is_training
__lowercase : Union[str, Any] = use_input_mask
__lowercase : Any = use_token_type_ids
__lowercase : Any = use_labels
__lowercase : Optional[Any] = vocab_size
__lowercase : Optional[Any] = hidden_size
__lowercase : str = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : List[str] = hidden_dropout_prob
__lowercase : List[str] = attention_probs_dropout_prob
__lowercase : Optional[int] = max_position_embeddings
__lowercase : Tuple = type_vocab_size
__lowercase : Tuple = type_sequence_label_size
__lowercase : str = initializer_range
__lowercase : List[str] = num_labels
__lowercase : Dict = num_choices
__lowercase : Dict = scope
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : List[Any] = None
if self.use_input_mask:
__lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : List[Any] = None
__lowercase : int = None
__lowercase : Dict = None
if self.use_labels:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self ) -> Tuple:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
__lowercase : Dict = NystromformerModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowercase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
__lowercase : Dict = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
__lowercase : List[str] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
__lowercase : Tuple = NystromformerForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowercase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
__lowercase : Union[str, Any] = NystromformerForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowercase : Any = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
__lowercase : List[str] = self.num_labels
__lowercase : Any = NystromformerForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowercase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
__lowercase : Optional[int] = self.num_labels
__lowercase : Optional[int] = NystromformerForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowercase : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
__lowercase : Tuple = self.num_choices
__lowercase : Dict = NystromformerForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowercase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Optional[int] = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) : Union[str, Any] = config_and_inputs
__lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase =(
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase =False
UpperCamelCase =False
def _lowerCamelCase ( self ) -> int:
__lowercase : Dict = NystromformerModelTester(self )
__lowercase : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def _lowerCamelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ) -> Any:
__lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Any:
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase : Dict = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> str:
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ )
@slow
def _lowerCamelCase ( self ) -> Union[str, Any]:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Dict = NystromformerModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Dict = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
__lowercase : str = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
__lowercase : Union[str, Any] = model(UpperCamelCase_ )[0]
__lowercase : List[Any] = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , UpperCamelCase_ )
__lowercase : int = torch.tensor(
[[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Tuple = '''the [MASK] of Belgium is Brussels'''
__lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
__lowercase : str = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
__lowercase : int = tokenizer(UpperCamelCase_ , return_tensors='''pt''' )
with torch.no_grad():
__lowercase : str = model(encoding.input_ids ).logits
__lowercase : int = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(UpperCamelCase_ ) , '''capital''' )
| 76 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="resnet50" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , ) -> Optional[Any]:
a_ = parent
a_ = out_indices if out_indices is not None else [4]
a_ = stage_names
a_ = out_features
a_ = backbone
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = use_pretrained_backbone
a_ = is_training
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = TimmBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
a_ = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = self.prepare_config_and_inputs()
a_ , a_ = config_and_inputs
a_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (TimmBackbone,) if is_torch_available() else ()
_UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = TimmBackboneModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = 'resnet18'
a_ = 'microsoft/resnet-18'
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ , out_indices=[1, 2, 3] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
a_ = self.has_attentions
# no need to test all models as different heads yield the same functionality
a_ = self.all_model_classes[0]
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = model(**UpperCAmelCase__ )
a_ = outputs[0][-1]
# Encoder-/Decoder-only models
a_ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a_ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = None
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = False
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
| 697 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A = logging.get_logger(__name__)
A = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
A = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
A = {
"""gpt-neox-20b""": 2_048,
}
class a__ ( __magic_name__ ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : int="<|endoftext|>" , UpperCamelCase_ : Union[str, Any]="<|endoftext|>" , UpperCamelCase_ : Tuple="<|endoftext|>" , UpperCamelCase_ : Optional[int]=False , **UpperCamelCase_ : Optional[int] , ):
"""simple docstring"""
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , UpperCamelCase_) != add_prefix_space:
__UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase_ , pre_tok_state.pop("type"))
__UpperCAmelCase : int = add_prefix_space
__UpperCAmelCase : str = pre_tok_class(**UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = add_prefix_space
def a_ ( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_)
return tuple(UpperCamelCase_)
def a_ ( self : Optional[Any] , UpperCamelCase_ : "Conversation"):
"""simple docstring"""
__UpperCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_) + [self.eos_token_id])
if len(UpperCamelCase_) > self.model_max_length:
__UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 77 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , ) -> List[Any]:
a_ = size if size is not None else {'height': 18, 'width': 18}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = image_size
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = apply_ocr
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# with apply_OCR = True
a_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
a_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
a_ = Image.open(ds[0]['file'] ).convert('RGB' )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
a_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
a_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 697 | 0 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
a__ : Tuple = ["""pixel_values"""]
def __init__(self : int , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : int = 8 , **__a : int , ):
super().__init__(**__a )
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = pad_size
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : int , __a : Optional[Union[str, ChannelDimension]] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(__a )
UpperCAmelCase_ = (old_height // size + 1) * size - old_height
UpperCAmelCase_ = (old_width // size + 1) * size - old_width
return pad(__a , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__a )
def _lowercase (self : Tuple , __a : ImageInput , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : List[str] , ):
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase_ = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase_ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__a ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_pad:
UpperCAmelCase_ = [self.pad(__a , size=__a ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 78 |
'''simple docstring'''
import math
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( _UpperCAmelCase = 1_0_0_0_1 ) -> int:
"""simple docstring"""
try:
a_ = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
a_ = []
a_ = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 0 |
import random
from .binary_exp_mod import bin_exp_mod
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase=1000 ) -> List[Any]:
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
UpperCAmelCase__ : Union[str, Any] = n - 1
UpperCAmelCase__ : Union[str, Any] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
UpperCAmelCase__ : Dict = 0
while count < prec:
UpperCAmelCase__ : List[Any] = random.randint(2 , n - 1 )
UpperCAmelCase__ : Optional[Any] = bin_exp_mod(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if b != 1:
UpperCAmelCase__ : List[Any] = True
for _ in range(__lowerCamelCase ):
if b == n - 1:
UpperCAmelCase__ : List[str] = False
break
UpperCAmelCase__ : List[str] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 79 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = 10
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = [1, 2, 3, 4]
a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = ''
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
a_ , a_ = process_story(UpperCAmelCase__ )
a_ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = ['It was the best of times.']
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = torch.tensor([1, 2, 3, 4] )
a_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = 101
a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = DPTConfig()
if "large" in checkpoint_url:
__lowercase = 1_024
__lowercase = 4_096
__lowercase = 24
__lowercase = 16
__lowercase = [5, 11, 17, 23]
__lowercase = [256, 512, 1_024, 1_024]
__lowercase = (1, 384, 384)
if "ade" in checkpoint_url:
__lowercase = True
__lowercase = 150
__lowercase = """huggingface/label-files"""
__lowercase = """ade20k-id2label.json"""
__lowercase = json.load(open(cached_download(hf_hub_url(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = [1, 150, 480, 480]
return config, expected_shape
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowercase = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
__lowercase = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
__lowercase = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
__lowercase = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
__lowercase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
__lowercase = name.replace("""proj""" , """projection""" )
if "blocks" in name:
__lowercase = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
__lowercase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowercase = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
__lowercase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowercase = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
__lowercase = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
__lowercase = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
__lowercase = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
__lowercase = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
__lowercase = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
__lowercase = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
__lowercase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowercase = name.replace(F'refinenet{layer_idx}' , F'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__lowercase = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
__lowercase = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
__lowercase = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
__lowercase = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
__lowercase = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowercase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowercase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowercase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowercase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowercase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
__lowercase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
__lowercase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
__lowercase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
__lowercase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
__lowercase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
__lowercase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
__lowercase = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
__lowercase = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
__lowercase = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
__lowercase = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
__lowercase = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' )
__lowercase = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: config.hidden_size, :]
__lowercase = in_proj_bias[: config.hidden_size]
__lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase = in_proj_weight[
-config.hidden_size :, :
]
__lowercase = in_proj_bias[-config.hidden_size :]
def snake_case ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase , __lowercase = get_dpt_config(lowerCamelCase )
# load original state_dict from URL
__lowercase = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(lowerCamelCase )
__lowercase = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase , lowerCamelCase )
# load HuggingFace model
__lowercase = DPTForSemanticSegmentation(lowerCamelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# Check outputs on an image
__lowercase = 480 if """ade""" in checkpoint_url else 384
__lowercase = DPTImageProcessor(size=lowerCamelCase )
__lowercase = prepare_img()
__lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" )
# forward pass
__lowercase = model(**lowerCamelCase ).logits if """ade""" in checkpoint_url else model(**lowerCamelCase ).predicted_depth
# Assert logits
__lowercase = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
__lowercase = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(lowerCamelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCamelCase )
)
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
__UpperCamelCase : Tuple = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 80 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCAmelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class _snake_case ( unittest.TestCase , snake_case ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = load_tool('text-question-answering' )
self.tool.setup()
a_ = load_tool('text-question-answering' , remote=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.remote_tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = self.tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = self.remote_tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
| 697 | 0 |
def lowerCAmelCase_ ( __lowerCamelCase = 1_0_0_0_0_0_0 ):
__snake_case : Any = 1
__snake_case : Any = 1
__snake_case : Union[str, Any] = {1: 1}
for inputa in range(2 , __lowerCamelCase ):
__snake_case : str = 0
__snake_case : List[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__snake_case : List[str] = (3 * number) + 1
counter += 1
if inputa not in counters:
__snake_case : str = counter
if counter > pre_counter:
__snake_case : List[Any] = inputa
__snake_case : Tuple = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 81 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 697 | 0 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
def constraint_to_multiple_of(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=0 , lowerCAmelCase__=None ):
UpperCAmelCase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCAmelCase_ = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCAmelCase_ = math.ceil(val / multiple ) * multiple
return x
UpperCAmelCase_ = (output_size, output_size) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else output_size
UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = output_size
# determine new height and width
UpperCAmelCase_ = output_height / input_height
UpperCAmelCase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCAmelCase_ = scale_width
else:
# fit height
UpperCAmelCase_ = scale_height
UpperCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=lowerCAmelCase__ )
UpperCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=lowerCAmelCase__ )
return (new_height, new_width)
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : Optional[int] , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 1 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , **_UpperCAmelCase : int , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = size if size is not None else {"height": 384, "width": 384}
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = keep_aspect_ratio
UpperCAmelCase_ = ensure_multiple_of
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : List[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 1 , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[Any] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
UpperCAmelCase_ = get_resize_output_image_size(
_UpperCAmelCase , output_size=(size["height"], size["width"]) , keep_aspect_ratio=_UpperCAmelCase , multiple=_UpperCAmelCase , )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : List[str] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
'''simple docstring'''
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : int , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : int = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : int = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : Any , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase )
UpperCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Tuple] = None ) -> int:
'''simple docstring'''
UpperCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(_UpperCAmelCase ):
UpperCAmelCase_ = target_sizes.numpy()
UpperCAmelCase_ = []
for idx in range(len(_UpperCAmelCase ) ):
UpperCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=_UpperCAmelCase )
UpperCAmelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_UpperCAmelCase )
else:
UpperCAmelCase_ = logits.argmax(dim=1 )
UpperCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 82 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "vit"
def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict:
super().__init__(**UpperCAmelCase__ )
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = encoder_stride
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
return 1e-4
| 697 | 0 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def snake_case_ ( A_ : Dict, A_ : bool = True, A_ : float = math.inf, A_ : float = -math.inf, A_ : float = math.inf, A_ : float = -math.inf, A_ : bool = False, A_ : float = 1_00, A_ : float = 0.01, A_ : float = 1, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : str = search_prob
_lowerCamelCase : str = start_temperate
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : int = 0
_lowerCamelCase : Any = None
while not search_end:
_lowerCamelCase : Dict = current_state.score()
if best_state is None or current_score > best_state.score():
_lowerCamelCase : Tuple = current_state
scores.append(A_ )
iterations += 1
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Optional[int] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_lowerCamelCase : List[Any] = random.randint(0, len(A_ ) - 1 ) # picking a random neighbor
_lowerCamelCase : Dict = neighbors.pop(A_ )
_lowerCamelCase : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_lowerCamelCase : str = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_lowerCamelCase : Optional[Any] = picked_neighbor
else:
_lowerCamelCase : Optional[int] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_lowerCamelCase : Union[str, Any] = picked_neighbor
_lowerCamelCase : List[str] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_lowerCamelCase : Tuple = True
else:
_lowerCamelCase : Optional[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A_ ), A_ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def snake_case_ ( A_ : int, A_ : Tuple ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def snake_case_ ( A_ : Optional[int], A_ : List[Any] ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
| 83 |
'''simple docstring'''
def a ( _UpperCAmelCase = 5_0 ) -> int:
"""simple docstring"""
a_ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 0 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [0] * len(__SCREAMING_SNAKE_CASE )
lowercase = []
lowercase = [1] * len(__SCREAMING_SNAKE_CASE )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
while queue:
lowercase = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
print(max(__SCREAMING_SNAKE_CASE ) )
# Adjacency list of Graph
UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 84 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def a ( ) -> None:
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 697 | 0 |
import os
def _a ( lowercase__ : str = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) as input_file:
SCREAMING_SNAKE_CASE__ : List[str] = [
[int(lowercase__ ) for element in line.split(',' )]
for line in input_file.readlines()
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(lowercase__ )
SCREAMING_SNAKE_CASE__ : str = len(matrix[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = [[-1 for _ in range(lowercase__ )] for _ in range(lowercase__ )]
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Any = matrix[i][0]
for j in range(1 , lowercase__ ):
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
SCREAMING_SNAKE_CASE__ : List[Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _snake_case ( snake_case ):
"""simple docstring"""
@staticmethod
def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> str:
a_ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=UpperCAmelCase__ , help='Name of the model to download' )
download_parser.set_defaults(func=UpperCAmelCase__ )
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = model
a_ = cache
a_ = force
a_ = trust_remote_code
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 697 | 0 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : int=13 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : Dict=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Tuple=99 , UpperCAmelCase : Dict=32 , UpperCAmelCase : Dict=5 , UpperCAmelCase : str=4 , UpperCAmelCase : Union[str, Any]=37 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : List[str]=128 , UpperCAmelCase : Optional[Any]=32 , UpperCAmelCase : Any=16 , UpperCAmelCase : int=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : List[Any]=3 , UpperCAmelCase : Any=4 , UpperCAmelCase : List[str]=None , ):
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
def __A ( self : List[str] ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : int ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
def __A ( self : Any ):
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = self.prepare_config_and_inputs()
A_ = True
A_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] ):
A_ = NezhaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
A_ = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , ):
A_ = True
A_ = NezhaModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )
A_ = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] ):
A_ = NezhaForMaskedLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int ):
A_ = NezhaForNextSentencePrediction(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __A ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ):
A_ = NezhaForPreTraining(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , next_sentence_label=UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __A ( self : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] ):
A_ = NezhaForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = NezhaForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ):
A_ = self.num_labels
A_ = NezhaForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : int ):
A_ = self.num_choices
A_ = NezhaForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : List[Any] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _a ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : str = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Optional[int] = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : List[str] = True
def __A ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False ):
A_ = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class in get_values(UpperCAmelCase ):
A_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase )
A_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def __A ( self : Any ):
A_ = NezhaModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : List[str] ):
A_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase )
def __A ( self : str ):
# This regression test was failing with PyTorch < 1.3
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
A_ = None
self.model_tester.create_and_check_model_as_decoder(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , )
def __A ( self : List[str] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCAmelCase )
def __A ( self : Union[str, Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase )
def __A ( self : Any ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __A ( self : int ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = NezhaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@slow
@require_torch_gpu
def __A ( self : List[str] ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
A_ = True
A_ = model_class(config=UpperCAmelCase )
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = torch.jit.trace(
UpperCAmelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase , os.path.join(UpperCAmelCase , "bert.pt" ) )
A_ = torch.jit.load(os.path.join(UpperCAmelCase , "bert.pt" ) , map_location=UpperCAmelCase )
loaded(inputs_dict["input_ids"].to(UpperCAmelCase ) , inputs_dict["attention_mask"].to(UpperCAmelCase ) )
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Union[str, Any] ):
A_ = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
A_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A_ = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
A_ = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , UpperCAmelCase )
A_ = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) )
@slow
def __A ( self : List[str] ):
A_ = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
A_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A_ = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
A_ = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , UpperCAmelCase )
A_ = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) ) | 86 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
assert column_title.isupper()
a_ = 0
a_ = len(_UpperCAmelCase ) - 1
a_ = 0
while index >= 0:
a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Any:
"""simple docstring"""
A__ = [0 for i in range(r + 1 )]
# nc0 = 1
A__ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
A__ = min(lowercase_ , lowercase_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 87 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase =logging.get_logger(__name__)
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , UpperCAmelCase__=2048 , UpperCAmelCase__=1 , UpperCAmelCase__=[16, 16] , UpperCAmelCase__=128 , UpperCAmelCase__=4_4100 , UpperCAmelCase__=86 , UpperCAmelCase__=2048 , UpperCAmelCase__=0.0 , **UpperCAmelCase__ , ) -> Union[str, Any]:
super().__init__(
feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = spectrogram_length
a_ = num_channels
a_ = patch_size
a_ = feature_size // self.patch_size[1]
a_ = n_fft
a_ = sampling_rate // hop_length_to_sampling_rate
a_ = sampling_rate
a_ = padding_value
a_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=UpperCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> np.ndarray:
a_ = spectrogram(
UpperCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
a_ = log_spec[:, :-1]
a_ = log_spec - 2_0.0
a_ = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , **UpperCAmelCase__ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a_ = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
a_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a_ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCAmelCase__ ):
a_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a_ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a_ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a_ = np.array(UpperCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
a_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a_ = np.ones([len(UpperCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a_ = padded_audio_features * self.padding_value
for i in range(len(UpperCAmelCase__ ) ):
a_ = audio_features[i]
a_ = feature
# return as BatchFeature
if return_attention_mask:
a_ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
a_ = {'audio_values': padded_audio_features}
a_ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
return encoded_inputs
| 697 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase__ ( A_ ,unittest.TestCase ):
__UpperCAmelCase = TransfoXLTokenizer
__UpperCAmelCase = False
__UpperCAmelCase = False
def UpperCamelCase_ ( self) -> List[Any]:
super().setUp()
_lowerCamelCase : List[Any] = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
def UpperCamelCase_ ( self , **SCREAMING_SNAKE_CASE) -> Optional[int]:
_lowerCamelCase : Union[str, Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> str:
_lowerCamelCase : Optional[int] = """<unk> UNwanted , running"""
_lowerCamelCase : int = """<unk> unwanted, running"""
return input_text, output_text
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase : Optional[Any] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = tokenizer.tokenize("""<unk> UNwanted , running""")
self.assertListEqual(SCREAMING_SNAKE_CASE , ["""<unk>""", """unwanted""", """,""", """running"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE) , [0, 4, 8, 7])
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Tuple = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""])
def UpperCamelCase_ ( self) -> Union[str, Any]:
_lowerCamelCase : Optional[Any] = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : int = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
_lowerCamelCase : str = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE)
self.assertEqual(tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase : Optional[int] = self.get_tokenizer()
_lowerCamelCase : Any = len(SCREAMING_SNAKE_CASE)
tokenizer.add_tokens(["""new1""", """new2"""])
tokenizer.move_added_token("""new1""" , 1)
# Check that moved token is not copied (duplicate)
self.assertEqual(len(SCREAMING_SNAKE_CASE) , original_len + 2)
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""") , [1])
self.assertEqual(tokenizer.decode([1]) , """new1""")
| 88 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
a_ = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
a_ = [element for element in set_a if element in set_b]
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
a_ = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
__lowerCAmelCase ={"a", "b", "c", "d", "e"}
__lowerCAmelCase ={"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 697 | 0 |
import math
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> float:
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowerCamelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 89 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
a_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 0 |
'''simple docstring'''
from __future__ import annotations
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = order
# a_{0} ... a_{k}
lowerCAmelCase__ = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowerCAmelCase__ = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowerCAmelCase__ = [0.0] * self.order
# y[n-1] ... y[n-k]
lowerCAmelCase__ = [0.0] * self.order
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if len(lowerCamelCase_ ) < self.order:
lowerCAmelCase__ = [1.0, *a_coeffs]
if len(lowerCamelCase_ ) != self.order + 1:
lowerCAmelCase__ = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(lowerCamelCase_ )}"""
)
raise ValueError(lowerCamelCase_ )
if len(lowerCamelCase_ ) != self.order + 1:
lowerCAmelCase__ = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(lowerCamelCase_ )}"""
)
raise ValueError(lowerCamelCase_ )
lowerCAmelCase__ = a_coeffs
lowerCAmelCase__ = b_coeffs
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> float:
lowerCAmelCase__ = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowerCAmelCase__ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowerCAmelCase__ = self.input_history[:-1]
lowerCAmelCase__ = self.output_history[:-1]
lowerCAmelCase__ = sample
lowerCAmelCase__ = result
return result | 90 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _snake_case ( snake_case ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> str:
super().__init__(
features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = Generator(
cache_dir=UpperCAmelCase__ , features=UpperCAmelCase__ , generator=UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
a_ = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
a_ = None
a_ = None
a_ = None
a_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , )
a_ = self.builder.as_dataset(
split='train' , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 697 | 0 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_lowercase = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str
_lowerCamelCase: Optional[str] = None
_lowerCamelCase: Optional[Union[str, int]] = None
_lowerCamelCase: Optional[Union[str, int]] = None
_lowerCamelCase: Optional[Union[str, int]] = None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
A , A , A = _str_to_version_tuple(self.version_str )
def __repr__( self : Optional[Any] ) -> Tuple:
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
return self.major, self.minor, self.patch
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[str] ) -> Any:
if isinstance(A_ ,A_ ):
return Version(A_ )
elif isinstance(A_ ,A_ ):
return other
raise TypeError(F'{other} (type {type(A_ )}) cannot be compared to version.' )
def __eq__( self : Union[str, Any] ,A_ : Optional[int] ) -> List[Any]:
try:
A = self._validate_operand(A_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Optional[int] ,A_ : Optional[int] ) -> Any:
A = self._validate_operand(A_ )
return self.tuple < other.tuple
def __hash__( self : Union[str, Any] ) -> List[str]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,A_ : str ) -> Optional[int]:
A = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.version_str
def _snake_case ( snake_case__ : Optional[int] ):
A = _VERSION_REG.match(snake_case__ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(snake_case__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def _snake_case ( snake_case__ : Optional[Any] ):
return ".".join(str(snake_case__ ) for v in version_tuple ) | 91 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase =numpy.array([0, 0])
__lowerCAmelCase =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase =numpy.array([1, 0])
__lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = initial_vectors
for _ in range(_UpperCAmelCase ):
a_ = iteration_step(_UpperCAmelCase )
return vectors
def a ( _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = []
for i, start_vector in enumerate(vectors[:-1] ):
a_ = vectors[i + 1]
new_vectors.append(_UpperCAmelCase )
a_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray:
"""simple docstring"""
a_ = numpy.radians(_UpperCAmelCase )
a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase )
a_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCAmelCase , _UpperCAmelCase )
def a ( _UpperCAmelCase ) -> None:
"""simple docstring"""
a_ = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a_ , a_ = zip(*_UpperCAmelCase )
plt.plot(_UpperCAmelCase , _UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 697 | 0 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : str ) -> Any:
# load base model
lowercase : Any =StableDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowercase : Tuple =load_file(__magic_name__ )
lowercase : str =[]
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowercase : Optional[int] =key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
lowercase : List[str] =pipeline.text_encoder
else:
lowercase : Any =key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
lowercase : Any =pipeline.unet
# find the target layer
lowercase : Dict =layer_infos.pop(0 )
while len(__magic_name__ ) > -1:
try:
lowercase : Tuple =curr_layer.__getattr__(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase : Any =layer_infos.pop(0 )
elif len(__magic_name__ ) == 0:
break
except Exception:
if len(__magic_name__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowercase : int =layer_infos.pop(0 )
lowercase : Optional[Any] =[]
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(__magic_name__ )
else:
pair_keys.append(__magic_name__ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowercase : Optional[int] =state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowercase : Dict =state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__magic_name__ , __magic_name__ ).unsqueeze(2 ).unsqueeze(3 )
else:
lowercase : List[str] =state_dict[pair_keys[0]].to(torch.floataa )
lowercase : Union[str, Any] =state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__magic_name__ , __magic_name__ )
# update visited list
for item in pair_keys:
visited.append(__magic_name__ )
return pipeline
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = args.base_model_path
UpperCamelCase_ = args.checkpoint_path
UpperCamelCase_ = args.dump_path
UpperCamelCase_ = args.lora_prefix_unet
UpperCamelCase_ = args.lora_prefix_text_encoder
UpperCamelCase_ = args.alpha
UpperCamelCase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
UpperCamelCase_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 92 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def a ( _UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for char in word:
a_ = ord(_UpperCAmelCase )
if not _is_chinese_char(_UpperCAmelCase ):
return 0
return 1
def a ( _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
a_ = set()
for token in tokens:
a_ = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase )
if chinese_word:
word_set.add(_UpperCAmelCase )
a_ = list(_UpperCAmelCase )
return word_list
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
a_ = max([len(_UpperCAmelCase ) for w in chinese_word_set] )
a_ = bert_tokens
a_ , a_ = 0, len(_UpperCAmelCase )
while start < end:
a_ = True
if is_chinese(bert_word[start] ):
a_ = min(end - start , _UpperCAmelCase )
for i in range(_UpperCAmelCase , 1 , -1 ):
a_ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a_ = '##' + bert_word[j]
a_ = start + i
a_ = False
break
if single_word:
start += 1
return bert_word
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
"""simple docstring"""
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws
a_ = [get_chinese_word(_UpperCAmelCase ) for r in res]
ltp_res.extend(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ):
a_ = []
for id in input_ids:
a_ = bert_tokenizer._convert_id_to_token(_UpperCAmelCase )
input_tokens.append(_UpperCAmelCase )
a_ = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase )
a_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCAmelCase ):
if token[:2] == "##":
a_ = token[2:]
# save chinese tokens' pos
if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ):
ref_id.append(_UpperCAmelCase )
ref_ids.append(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
return ref_ids
def a ( _UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
a_ = f.readlines()
a_ = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a_ = LTP(args.ltp ) # faster in GPU device
a_ = BertTokenizer.from_pretrained(args.bert )
a_ = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
a_ = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__lowerCAmelCase =parser.parse_args()
main(args)
| 697 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=9_9 , __UpperCAmelCase=3_2 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=3_7 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=1_6 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="None" , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = parent
lowerCAmelCase__ :int = batch_size
lowerCAmelCase__ :Tuple = seq_length
lowerCAmelCase__ :Optional[Any] = is_training
lowerCAmelCase__ :int = use_input_mask
lowerCAmelCase__ :Dict = use_token_type_ids
lowerCAmelCase__ :List[str] = use_labels
lowerCAmelCase__ :Optional[Any] = vocab_size
lowerCAmelCase__ :Dict = hidden_size
lowerCAmelCase__ :Optional[int] = num_hidden_layers
lowerCAmelCase__ :List[Any] = num_attention_heads
lowerCAmelCase__ :str = intermediate_size
lowerCAmelCase__ :Optional[int] = hidden_act
lowerCAmelCase__ :Tuple = hidden_dropout_prob
lowerCAmelCase__ :int = attention_probs_dropout_prob
lowerCAmelCase__ :int = max_position_embeddings
lowerCAmelCase__ :Optional[int] = type_vocab_size
lowerCAmelCase__ :Any = type_sequence_label_size
lowerCAmelCase__ :Tuple = initializer_range
lowerCAmelCase__ :Any = num_labels
lowerCAmelCase__ :Tuple = num_choices
lowerCAmelCase__ :int = relative_attention
lowerCAmelCase__ :Tuple = position_biased_input
lowerCAmelCase__ :Union[str, Any] = pos_att_type
lowerCAmelCase__ :Tuple = scope
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ :str = None
if self.use_input_mask:
lowerCAmelCase__ :Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ :int = None
if self.use_token_type_ids:
lowerCAmelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ :int = None
lowerCAmelCase__ :List[Any] = None
lowerCAmelCase__ :Dict = None
if self.use_labels:
lowerCAmelCase__ :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ :Dict = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = TFDebertaVaModel(config=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase__ :Union[str, Any] = [input_ids, input_mask]
lowerCAmelCase__ :Union[str, Any] = model(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = TFDebertaVaForMaskedLM(config=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase__ :List[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.num_labels
lowerCAmelCase__ :Dict = TFDebertaVaForSequenceClassification(config=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase__ :Union[str, Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = self.num_labels
lowerCAmelCase__ :List[Any] = TFDebertaVaForTokenClassification(config=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase__ :List[str] = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = TFDebertaVaForQuestionAnswering(config=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase__ :Optional[int] = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) :Tuple = config_and_inputs
lowerCAmelCase__ :Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :List[str] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__magic_name__ :List[Any] = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ :List[str] = False
__magic_name__ :Optional[Any] = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = TFDebertaVaModelTester(self )
lowerCAmelCase__ :str = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 )
def snake_case ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def snake_case ( self ):
'''simple docstring'''
pass
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
lowerCAmelCase__ :Optional[int] = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase__ :Optional[Any] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCAmelCase__ :Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
lowerCAmelCase__ :Optional[Any] = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4 )
| 93 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": 512,
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = RetriBertTokenizer
_UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__="[UNK]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[PAD]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> int:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
a_ = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) )
a_ = do_lower_case
a_ = strip_accents
a_ = tokenize_chinese_chars
a_ = normalizer_class(**UpperCAmelCase__ )
a_ = do_lower_case
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str:
a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]:
a_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 697 | 0 |
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = BertJapaneseTokenizer
UpperCamelCase_ = False
UpperCamelCase_ = True
def A__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
super().setUp()
lowercase : Optional[int] =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
lowercase : List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def A__ ( self : List[str] , UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
lowercase : Optional[int] ='''こんにちは、世界。 \nこんばんは、世界。'''
lowercase : Dict ='''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def A__ ( self : Tuple , UpperCAmelCase : List[str] ) -> int:
'''simple docstring'''
lowercase , lowercase : List[str] =self.get_input_output_texts(UpperCAmelCase )
lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowercase : Optional[Any] =tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
return text, ids
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def A__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass # TODO add if relevant
def A__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
pass # TODO add if relevant
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.tokenizer_class(self.vocab_file )
lowercase : Dict =tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(UpperCAmelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def A__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowercase : Optional[int] =self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(UpperCAmelCase )
lowercase : Dict ='''こんにちは、世界。\nこんばんは、世界。'''
lowercase : Optional[Any] =tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase : Union[str, Any] =os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCAmelCase , '''wb''' ) as handle:
pickle.dump(UpperCAmelCase , UpperCAmelCase )
with open(UpperCAmelCase , '''rb''' ) as handle:
lowercase : Tuple =pickle.load(UpperCAmelCase )
lowercase : Dict =tokenizer_new.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : int =MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def A__ ( self : List[str] ) -> Any:
'''simple docstring'''
try:
lowercase : Tuple =MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def A__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
try:
lowercase : Optional[int] =MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =MecabTokenizer(do_lower_case=UpperCAmelCase , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def A__ ( self : str ) -> str:
'''simple docstring'''
try:
lowercase : Any =MecabTokenizer(
do_lower_case=UpperCAmelCase , normalize_text=UpperCAmelCase , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def A__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowercase : List[str] =MecabTokenizer(normalize_text=UpperCAmelCase , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : int =self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(UpperCAmelCase )
lowercase : Optional[int] ='''こんにちは、世界。\nこんばんは、世界。'''
lowercase : Any =tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase : Union[str, Any] =os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCAmelCase , '''wb''' ) as handle:
pickle.dump(UpperCAmelCase , UpperCAmelCase )
with open(UpperCAmelCase , '''rb''' ) as handle:
lowercase : Tuple =pickle.load(UpperCAmelCase )
lowercase : Tuple =tokenizer_new.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@require_sudachi
def A__ ( self : Optional[int] ) -> int:
'''simple docstring'''
lowercase : str =SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def A__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def A__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : int =SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def A__ ( self : Optional[int] ) -> str:
'''simple docstring'''
lowercase : Optional[Any] =SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] =SudachiTokenizer(do_lower_case=UpperCAmelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def A__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase : int =SudachiTokenizer(normalize_text=UpperCAmelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : int =SudachiTokenizer(trim_whitespace=UpperCAmelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def A__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : int =self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(UpperCAmelCase )
lowercase : Dict ='''こんにちは、世界。\nこんばんは、世界。'''
lowercase : List[Any] =tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase : Any =os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCAmelCase , '''wb''' ) as handle:
pickle.dump(UpperCAmelCase , UpperCAmelCase )
with open(UpperCAmelCase , '''rb''' ) as handle:
lowercase : Union[str, Any] =pickle.load(UpperCAmelCase )
lowercase : str =tokenizer_new.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@require_jumanpp
def A__ ( self : Any ) -> Any:
'''simple docstring'''
lowercase : Any =JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def A__ ( self : int ) -> int:
'''simple docstring'''
lowercase : Any =JumanppTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def A__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] =JumanppTokenizer(normalize_text=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Any =JumanppTokenizer(trim_whitespace=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def A__ ( self : List[str] ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : int =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
lowercase : Optional[Any] ={}
for i, token in enumerate(UpperCAmelCase ):
lowercase : str =i
lowercase : int =WordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def A__ ( self : Any ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
lowercase : Tuple =tokenizer.subword_tokenizer
lowercase : Union[str, Any] =subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(UpperCAmelCase , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
lowercase : Optional[Any] =subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(UpperCAmelCase , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def A__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
lowercase : Tuple =tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase )
lowercase : Tuple =tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase )
lowercase : Any =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowercase : Tuple =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = BertJapaneseTokenizer
UpperCamelCase_ = False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowercase : Any =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowercase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def A__ ( self : Dict , **UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCAmelCase )
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowercase : int ='''こんにちは、世界。 \nこんばんは、世界。'''
lowercase : Optional[int] ='''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def A__ ( self : Dict ) -> int:
'''simple docstring'''
pass # TODO add if relevant
def A__ ( self : str ) -> str:
'''simple docstring'''
pass # TODO add if relevant
def A__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
pass # TODO add if relevant
def A__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowercase : Dict =self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
lowercase : List[str] =tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
UpperCAmelCase , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def A__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowercase : List[Any] ={}
for i, token in enumerate(UpperCAmelCase ):
lowercase : Tuple =i
lowercase : Dict =CharacterTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def A__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
lowercase : Tuple =tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase )
lowercase : List[str] =tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase )
lowercase : Any =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowercase : List[str] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[int] ='''cl-tohoku/bert-base-japanese'''
lowercase : Optional[Any] =AutoTokenizer.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str ='''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(UpperCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
lowercase : str ='''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 94 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "lilt"
def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__="absolute" , UpperCAmelCase__=None , UpperCAmelCase__=4 , UpperCAmelCase__=1024 , **UpperCAmelCase__ , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = classifier_dropout
a_ = channel_shrink_ratio
a_ = max_ad_position_embeddings
| 697 | 0 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowerCamelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ (__A ):
__magic_name__ = '''vision-encoder-decoder'''
__magic_name__ = True
def __init__( self : Union[str, Any] , **lowerCAmelCase_ : List[Any] ) -> Union[str, Any]:
super().__init__(**lowerCAmelCase_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
UpperCAmelCase_ : str = kwargs.pop("encoder" )
UpperCAmelCase_ : Dict = encoder_config.pop("model_type" )
UpperCAmelCase_ : List[Any] = kwargs.pop("decoder" )
UpperCAmelCase_ : Dict = decoder_config.pop("model_type" )
UpperCAmelCase_ : Union[str, Any] = AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = True
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Dict ) -> PretrainedConfig:
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : List[str] = self.encoder.to_dict()
UpperCAmelCase_ : List[Any] = self.decoder.to_dict()
UpperCAmelCase_ : Any = self.__class__.model_type
return output
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class UpperCamelCase_ (__A ):
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase_ : List[Any] = OrderedDict()
UpperCAmelCase_ : int = {0: "batch", 1: "past_decoder_sequence + sequence"}
UpperCAmelCase_ : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"}
UpperCAmelCase_ : Union[str, Any] = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : "PreTrainedTokenizerBase" , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
import torch
UpperCAmelCase_ : Optional[int] = OrderedDict()
UpperCAmelCase_ : Any = super().generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dummy_input["input_ids"].shape
UpperCAmelCase_ : List[str] = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCAmelCase_ : List[str] = dummy_input.pop("input_ids" )
UpperCAmelCase_ : List[str] = dummy_input.pop("attention_mask" )
UpperCAmelCase_ : List[Any] = torch.zeros(lowerCAmelCase_ )
return common_inputs
class UpperCamelCase_ (__A ):
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> None:
pass
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : PretrainedConfig ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : str = "default" ) -> OnnxConfig:
UpperCAmelCase_ : Optional[int] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowerCAmelCase_ , lowerCAmelCase_ )
| 95 |
'''simple docstring'''
from __future__ import annotations
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# We need to create solution object to save path.
a_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
a_ = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase )
if solved:
print('\n'.join(str(_UpperCAmelCase ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
a_ = 1
return True
a_ = (not i < 0) and (not j < 0) # Check lower bounds
a_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
a_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
a_ = 1
# check for directions
if (
run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase )
):
return True
a_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 0 |
"""simple docstring"""
from statistics import mean
import numpy as np
def a ( __UpperCAmelCase : list , __UpperCAmelCase : list , __UpperCAmelCase : list , __UpperCAmelCase : int ) -> list:
__magic_name__: Dict = 0
# Number of processes finished
__magic_name__: List[str] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__magic_name__: Union[str, Any] = [0] * no_of_process
# List to include calculation results
__magic_name__: str = [0] * no_of_process
# Sort by arrival time.
__magic_name__: Dict = [burst_time[i] for i in np.argsort(__UpperCAmelCase )]
__magic_name__: str = [process_name[i] for i in np.argsort(__UpperCAmelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
__magic_name__: Tuple = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__magic_name__: Optional[Any] = arrival_time[i]
__magic_name__: List[Any] = 0
# Index showing the location of the process being performed
__magic_name__: List[Any] = 0
# Saves the current response ratio.
__magic_name__: Any = 0
for i in range(0 , __UpperCAmelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__magic_name__: Any = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__magic_name__: Union[str, Any] = temp
__magic_name__: Union[str, Any] = i
# Calculate the turn around time
__magic_name__: List[Any] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__magic_name__: Union[str, Any] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def a ( __UpperCAmelCase : list , __UpperCAmelCase : list , __UpperCAmelCase : list , __UpperCAmelCase : int ) -> list:
__magic_name__: List[str] = [0] * no_of_process
for i in range(0 , __UpperCAmelCase ):
__magic_name__: str = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__lowerCamelCase = 5
__lowerCamelCase = ['A', 'B', 'C', 'D', 'E']
__lowerCamelCase = [1, 2, 3, 4, 5]
__lowerCamelCase = [1, 2, 3, 4, 5]
__lowerCamelCase = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__lowerCamelCase = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time')
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 96 |
'''simple docstring'''
__lowerCAmelCase ={
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
__lowerCAmelCase ={
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
"""simple docstring"""
a_ = from_type.lower().strip('s' )
a_ = to_type.lower().strip('s' )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
a_ = METRIC_CONVERSION[from_sanitized]
a_ = METRIC_CONVERSION[to_sanitized]
a_ = 1
if from_exponent > to_exponent:
a_ = from_exponent - to_exponent
else:
a_ = -(to_exponent - from_exponent)
return value * pow(1_0 , _UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 97 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__lowerCAmelCase ="naver-clova-ix/donut-base"
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = DonutProcessor.from_pretrained(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
a_ = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
a_ = self.processor.tokenajson(UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 0 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def a__ ( lowercase : float ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowercase, 0, lowercase, args=(lowercase) )[0]
def a__ ( lowercase : float, lowercase : float ) -> float:
"""simple docstring"""
return math.pow(lowercase, z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 98 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="resnet50" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , ) -> Optional[Any]:
a_ = parent
a_ = out_indices if out_indices is not None else [4]
a_ = stage_names
a_ = out_features
a_ = backbone
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = use_pretrained_backbone
a_ = is_training
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = TimmBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
a_ = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = self.prepare_config_and_inputs()
a_ , a_ = config_and_inputs
a_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (TimmBackbone,) if is_torch_available() else ()
_UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = TimmBackboneModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = 'resnet18'
a_ = 'microsoft/resnet-18'
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ , out_indices=[1, 2, 3] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
a_ = self.has_attentions
# no need to test all models as different heads yield the same functionality
a_ = self.all_model_classes[0]
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = model(**UpperCAmelCase__ )
a_ = outputs[0][-1]
# Encoder-/Decoder-only models
a_ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a_ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = None
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = False
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
| 697 | 0 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = ["""audio_values""", """audio_mask"""]
def __init__( self , __A=2048 , __A=1 , __A=[16, 16] , __A=128 , __A=44100 , __A=86 , __A=2048 , __A=0.0 , **__A , ):
super().__init__(
feature_size=__A , sampling_rate=__A , padding_value=__A , **__A , )
__a = spectrogram_length
__a = num_channels
__a = patch_size
__a = feature_size // self.patch_size[1]
__a = n_fft
__a = sampling_rate // hop_length_to_sampling_rate
__a = sampling_rate
__a = padding_value
__a = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__A , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=__A , norm="""slaney""" , mel_scale="""slaney""" , ).T
def snake_case_ ( self , __A ):
__a = spectrogram(
__A , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
__a = log_spec[:, :-1]
__a = log_spec - 20.0
__a = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , __A , __A = None , __A = True , __A = None , __A = False , __A = False , **__A , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__a = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__a = is_batched_numpy or (
isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__A , np.ndarray ):
__a = np.asarray(__A , dtype=np.floataa )
elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__a = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __A ):
__a = [np.asarray(__A , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__a = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__a = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__a = np.array(__A ).astype(np.floataa )
# convert into correct format for padding
__a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__a = np.ones([len(__A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__a = padded_audio_features * self.padding_value
for i in range(len(__A ) ):
__a = audio_features[i]
__a = feature
# return as BatchFeature
if return_attention_mask:
__a = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
__a = {"""audio_values""": padded_audio_features}
__a = BatchFeature(data=__A , tensor_type=__A )
return encoded_inputs
| 99 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , ) -> List[Any]:
a_ = size if size is not None else {'height': 18, 'width': 18}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = image_size
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = apply_ocr
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# with apply_OCR = True
a_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
a_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
a_ = Image.open(ds[0]['file'] ).convert('RGB' )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
a_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
a_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 697 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.