code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 |
'''simple docstring'''
import re
def lowercase_ ( __A : str ) -> bool:
"""simple docstring"""
lowercase : Any =re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__A , __A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 8 | 1 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE = TypeVar('T')
class UpperCAmelCase_ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : bool = True ) -> None:
'''simple docstring'''
lowercase : dict[T, list[T]] ={} # dictionary of lists
lowercase : Dict =directed
def A__ ( self : List[str] , UpperCAmelCase : T , UpperCAmelCase : T ) -> GraphAdjacencyList[T]:
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase )
self.adj_list[destination_vertex].append(UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase )
lowercase : Tuple =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(UpperCAmelCase )
lowercase : Any =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase : int =[destination_vertex]
lowercase : List[str] =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase )
lowercase : List[Any] =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase : Optional[int] =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase : Union[str, Any] =[destination_vertex]
lowercase : int =[]
return self
def __repr__( self : str ) -> str:
'''simple docstring'''
return pformat(self.adj_list )
| 8 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''LayoutLMv2ImageProcessor'''
UpperCamelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : List[str] , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Any =kwargs.pop('''feature_extractor''' )
lowercase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase : Tuple =self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Optional[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : List[str] =features['''words''']
lowercase : Optional[Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
lowercase : List[str] =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : str =self.get_overflowing_images(UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Dict =images
return encoded_inputs
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : str =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}' )
return images_with_overflow
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8 | 1 |
'''simple docstring'''
def lowercase_ ( __A : int = 5_0 ) -> int:
"""simple docstring"""
lowercase : str =[1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8 |
'''simple docstring'''
def lowercase_ ( __A : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
"""simple docstring"""
try:
lowercase : Any =int(__A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =1
lowercase : Dict =2
while i * i <= n:
while n % i == 0:
lowercase : Optional[int] =i
n //= i
i += 1
if n > 1:
lowercase : Dict =n
return int(__A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''imagegpt'''
UpperCamelCase_ = ['''past_key_values''']
UpperCamelCase_ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[str] , UpperCAmelCase : Union[str, Any]=512 + 1 , UpperCAmelCase : str=32 * 32 , UpperCAmelCase : Union[str, Any]=512 , UpperCAmelCase : Optional[int]=24 , UpperCAmelCase : List[str]=8 , UpperCAmelCase : Dict=None , UpperCAmelCase : Dict="quick_gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Union[str, Any]=1e-5 , UpperCAmelCase : Tuple=0.0_2 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=False , UpperCAmelCase : Any=False , UpperCAmelCase : Any=False , **UpperCAmelCase : int , ) -> Any:
'''simple docstring'''
lowercase : Optional[Any] =vocab_size
lowercase : str =n_positions
lowercase : Dict =n_embd
lowercase : Optional[Any] =n_layer
lowercase : Union[str, Any] =n_head
lowercase : List[str] =n_inner
lowercase : Dict =activation_function
lowercase : List[Any] =resid_pdrop
lowercase : Optional[Any] =embd_pdrop
lowercase : Optional[Any] =attn_pdrop
lowercase : Any =layer_norm_epsilon
lowercase : List[str] =initializer_range
lowercase : int =scale_attn_weights
lowercase : Optional[int] =use_cache
lowercase : Optional[Any] =scale_attn_by_inverse_layer_idx
lowercase : List[str] =reorder_and_upcast_attn
lowercase : Optional[int] =tie_word_embeddings
super().__init__(tie_word_embeddings=UpperCAmelCase , **UpperCAmelCase )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@property
def A__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def A__ ( self : Union[str, Any] , UpperCAmelCase : "FeatureExtractionMixin" , UpperCAmelCase : int = 1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional["TensorType"] = None , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 32 , UpperCAmelCase : int = 32 , ) -> Mapping[str, Any]:
'''simple docstring'''
lowercase : Union[str, Any] =self._generate_dummy_images(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : List[Any] =dict(preprocessor(images=UpperCAmelCase , return_tensors=UpperCAmelCase ) )
return inputs
| 8 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
lowercase : str =u
for i in range(1 , __A ):
lowercase : Any =temp * (u - i)
return temp
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : List[str] =int(input('''enter the numbers of values: ''' ) )
lowercase : list[list[float]] =[]
for _ in range(__A ):
y.append([] )
for i in range(__A ):
for j in range(__A ):
y[i].append(__A )
lowercase : List[Any] =0
print('''enter the values of parameters in a list: ''' )
lowercase : Optional[int] =list(map(__A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__A ):
lowercase : str =float(input() )
lowercase : int =int(input('''enter the value to interpolate: ''' ) )
lowercase : Union[str, Any] =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A ):
for j in range(n - i ):
lowercase : str =y[j + 1][i - 1] - y[j][i - 1]
lowercase : Any =y[0][0]
for i in range(1 , __A ):
summ += (ucal(__A , __A ) * y[0][i]) / math.factorial(__A )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 8 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''swinv2'''
UpperCamelCase_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] , UpperCAmelCase : str=224 , UpperCAmelCase : str=4 , UpperCAmelCase : Dict=3 , UpperCAmelCase : Dict=96 , UpperCAmelCase : List[Any]=[2, 2, 6, 2] , UpperCAmelCase : Dict=[3, 6, 12, 24] , UpperCAmelCase : Tuple=7 , UpperCAmelCase : Tuple=4.0 , UpperCAmelCase : Tuple=True , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : int="gelu" , UpperCAmelCase : Dict=False , UpperCAmelCase : str=0.0_2 , UpperCAmelCase : Union[str, Any]=1e-5 , UpperCAmelCase : List[Any]=32 , **UpperCAmelCase : Dict , ) -> Dict:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : int =image_size
lowercase : Any =patch_size
lowercase : List[str] =num_channels
lowercase : Optional[int] =embed_dim
lowercase : List[str] =depths
lowercase : Tuple =len(UpperCAmelCase )
lowercase : List[Any] =num_heads
lowercase : List[str] =window_size
lowercase : Union[str, Any] =mlp_ratio
lowercase : List[Any] =qkv_bias
lowercase : List[Any] =hidden_dropout_prob
lowercase : Optional[int] =attention_probs_dropout_prob
lowercase : Tuple =drop_path_rate
lowercase : List[Any] =hidden_act
lowercase : List[str] =use_absolute_embeddings
lowercase : List[str] =layer_norm_eps
lowercase : Optional[int] =initializer_range
lowercase : Any =encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase : str =int(embed_dim * 2 ** (len(UpperCAmelCase ) - 1) )
lowercase : str =(0, 0, 0, 0)
| 8 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =0
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : str =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : int =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str =CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase : Optional[int] =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : Optional[Any] =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase : str =CLIPImageProcessor(**UpperCAmelCase )
# save in new folder
model_config.save_pretrained(UpperCAmelCase )
config.save_pretrained(UpperCAmelCase )
lowercase : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : int =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained('''clip-base''' )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Optional[int] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
lowercase : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
lowercase : List[str] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[int] =CustomImageProcessor.from_pretrained(UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self : Any ) -> Any:
'''simple docstring'''
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# If remote code is not set, the default is to use local
lowercase : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase : Tuple =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Dict =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 8 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
SCREAMING_SNAKE_CASE = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def lowercase_ ( __A : Optional[int] , __A : Optional[int] , __A : Tuple=None ) -> List[str]:
"""simple docstring"""
if rng is None:
lowercase : str =random.Random()
lowercase : str =1
for dim in shape:
total_dims *= dim
lowercase : int =[]
for _ in range(__A ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowercase : Optional[int] =np.array(__A , dtype=jnp.intaa ).reshape(__A )
return output
def lowercase_ ( __A : Optional[int] , __A : Optional[int]=None ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] =ids_tensor(__A , vocab_size=2 , rng=__A )
# make sure that at least one token is attended to for each batch
lowercase : Optional[int] =1
return attn_mask
@require_flax
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = None
UpperCamelCase_ = ()
def A__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase , lowercase : str =self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowercase : Dict =2
lowercase : List[str] =inputs['''input_ids'''].shape[-1] // 2
lowercase : Union[str, Any] =inputs['''input_ids'''][:max_batch_size, :sequence_length]
lowercase : List[str] =jnp.ones_like(UpperCAmelCase )
lowercase : Optional[Any] =attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowercase : List[Any] =input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowercase : Union[str, Any] =config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : int =self._get_input_ids_and_config()
lowercase : Tuple =False
lowercase : Tuple =max_length
lowercase : Any =0
for model_class in self.all_generative_model_classes:
lowercase : List[Any] =model_class(UpperCAmelCase )
lowercase : int =model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase : List[Any] =getattr(UpperCAmelCase , UpperCAmelCase )
lowercase : str =pt_model_class(UpperCAmelCase ).eval()
lowercase : Tuple =load_flax_weights_in_pytorch_model(UpperCAmelCase , flax_model.params )
lowercase : Any =flax_model.generate(UpperCAmelCase ).sequences
lowercase : int =pt_model.generate(torch.tensor(UpperCAmelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowercase : Optional[int] =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def A__ ( self : List[Any] ) -> int:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Union[str, Any] =self._get_input_ids_and_config()
lowercase : Union[str, Any] =False
lowercase : Any =max_length
for model_class in self.all_generative_model_classes:
lowercase : Dict =model_class(UpperCAmelCase )
lowercase : Optional[Any] =model.generate(UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase )
lowercase : Union[str, Any] =jit(model.generate )
lowercase : Optional[int] =jit_generate(UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A__ ( self : Any ) -> int:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : List[Any] =self._get_input_ids_and_config()
lowercase : List[Any] =True
lowercase : Tuple =max_length
for model_class in self.all_generative_model_classes:
lowercase : str =model_class(UpperCAmelCase )
lowercase : int =model.generate(UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase )
lowercase : List[Any] =jit(model.generate )
lowercase : Dict =jit_generate(UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : List[Any] =self._get_input_ids_and_config()
lowercase : Optional[Any] =False
lowercase : List[str] =max_length
lowercase : List[str] =2
for model_class in self.all_generative_model_classes:
lowercase : Union[str, Any] =model_class(UpperCAmelCase )
lowercase : int =model.generate(UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase )
lowercase : Tuple =jit(model.generate )
lowercase : Optional[Any] =jit_generate(UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : List[str] =self._get_input_ids_and_config()
lowercase : int =False
lowercase : List[Any] =max_length
lowercase : str =2
lowercase : Optional[Any] =2
for model_class in self.all_generative_model_classes:
lowercase : Any =model_class(UpperCAmelCase )
lowercase : Optional[int] =model.generate(UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def A__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Any =self._get_input_ids_and_config()
lowercase : int =True
lowercase : Any =max_length
lowercase : str =0.8
lowercase : List[str] =10
lowercase : Any =0.3
lowercase : str =1
lowercase : Union[str, Any] =8
lowercase : Optional[Any] =9
for model_class in self.all_generative_model_classes:
lowercase : str =model_class(UpperCAmelCase )
lowercase : Union[str, Any] =model.generate(UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase )
lowercase : int =jit(model.generate )
lowercase : List[Any] =jit_generate(UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Dict =self._get_input_ids_and_config()
lowercase : Dict =max_length
lowercase : Optional[int] =1
lowercase : Union[str, Any] =8
lowercase : Dict =9
for model_class in self.all_generative_model_classes:
lowercase : int =model_class(UpperCAmelCase )
lowercase : Optional[Any] =model.generate(UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase )
lowercase : int =jit(model.generate )
lowercase : Tuple =jit_generate(UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Any =self._get_input_ids_and_config()
lowercase : Any =max_length
lowercase : int =2
lowercase : Any =1
lowercase : List[Any] =8
lowercase : Dict =9
for model_class in self.all_generative_model_classes:
lowercase : int =model_class(UpperCAmelCase )
lowercase : List[Any] =model.generate(UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase )
lowercase : str =jit(model.generate )
lowercase : Dict =jit_generate(UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : List[str] =self._get_input_ids_and_config()
# pad attention mask on the left
lowercase : List[Any] =attention_mask.at[(0, 0)].set(0 )
lowercase : List[Any] =False
lowercase : Any =max_length
for model_class in self.all_generative_model_classes:
lowercase : Any =model_class(UpperCAmelCase )
lowercase : Tuple =model.generate(UpperCAmelCase , attention_mask=UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase )
lowercase : Optional[Any] =jit(model.generate )
lowercase : Union[str, Any] =jit_generate(UpperCAmelCase , attention_mask=UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Optional[int] =self._get_input_ids_and_config()
# pad attention mask on the left
lowercase : List[str] =attention_mask.at[(0, 0)].set(0 )
lowercase : str =True
lowercase : Any =max_length
for model_class in self.all_generative_model_classes:
lowercase : int =model_class(UpperCAmelCase )
lowercase : Union[str, Any] =model.generate(UpperCAmelCase , attention_mask=UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase )
lowercase : Union[str, Any] =jit(model.generate )
lowercase : Optional[Any] =jit_generate(UpperCAmelCase , attention_mask=UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Any =self._get_input_ids_and_config()
# pad attention mask on the left
lowercase : int =attention_mask.at[(0, 0)].set(0 )
lowercase : Union[str, Any] =2
lowercase : int =max_length
for model_class in self.all_generative_model_classes:
lowercase : Union[str, Any] =model_class(UpperCAmelCase )
lowercase : List[Any] =model.generate(UpperCAmelCase , attention_mask=UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase )
lowercase : List[Any] =jit(model.generate )
lowercase : Any =jit_generate(UpperCAmelCase , attention_mask=UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
lowercase : int =FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowercase : Optional[Any] ='''Hello world'''
lowercase : Optional[int] =tokenizer(UpperCAmelCase , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(UpperCAmelCase , '''do_samples''' ):
model.generate(UpperCAmelCase , do_samples=UpperCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(UpperCAmelCase , '''foo''' ):
lowercase : Optional[int] ={'''foo''': '''bar'''}
model.generate(UpperCAmelCase , **UpperCAmelCase )
| 8 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE = 8.988E9 # units = N * m^s * C^-2
def lowercase_ ( __A : float , __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
lowercase : Dict =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowercase : Union[str, Any] =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase : Tuple =(COULOMBS_CONSTANT * charge_product / abs(__A )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( __A : int , __A : int ) -> list[list[int]]:
"""simple docstring"""
lowercase : list[list[int]] =[]
create_all_state(1 , __A , __A , [] , __A )
return result
def lowercase_ ( __A : int , __A : int , __A : int , __A : list[int] , __A : list[list[int]] , ) -> None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__A , total_number - level + 2 ):
current_list.append(__A )
create_all_state(i + 1 , __A , level - 1 , __A , __A )
current_list.pop()
def lowercase_ ( __A : list[list[int]] ) -> None:
"""simple docstring"""
for i in total_list:
print(*__A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = generate_all_combinations(n, k)
print_all_state(total_list)
| 8 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8 | 1 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def lowercase_ ( __A : str , __A : Any ) -> Union[str, Any]:
"""simple docstring"""
if os.path.exists(__A ):
if os.path.exists(os.path.join(__A , '''config.json''' ) ) and os.path.isfile(
os.path.join(__A , '''config.json''' ) ):
os.remove(os.path.join(__A , '''config.json''' ) )
if os.path.exists(os.path.join(__A , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(__A , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(__A , '''pytorch_model.bin''' ) )
else:
os.makedirs(__A )
model.save_pretrained(__A )
def lowercase_ ( __A : int , __A : Any=False ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] =2
if unlogit:
lowercase : str =torch.pow(__A , __A )
lowercase : str =p * torch.log(__A )
lowercase : List[str] =0
return -plogp.sum(dim=-1 )
def lowercase_ ( __A : List[str] ) -> int:
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(F'{x + 1}' for x in range(len(__A ) ) ) )
for row in range(len(__A ) ):
if tensor.dtype != torch.long:
logger.info(F'layer {row + 1}:\t' + '''\t'''.join(F'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(F'layer {row + 1}:\t' + '''\t'''.join(F'{x:d}' for x in tensor[row].cpu().data ) )
def lowercase_ ( __A : Tuple , __A : Union[str, Any] , __A : List[str] , __A : Optional[Any]=True , __A : int=True , __A : List[Any]=None , __A : int=False ) -> Optional[Any]:
"""simple docstring"""
lowercase , lowercase : Dict =model.config.num_hidden_layers, model.config.num_attention_heads
lowercase : Optional[int] =torch.zeros(__A , __A ).to(args.device )
lowercase : Dict =torch.zeros(__A , __A ).to(args.device )
if head_mask is None:
lowercase : List[Any] =torch.ones(__A , __A ).to(args.device )
head_mask.requires_grad_(requires_grad=__A )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowercase : int =None
lowercase : str =0.0
lowercase : Dict =0.0
for step, inputs in enumerate(tqdm(__A , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
lowercase : Dict =tuple(t.to(args.device ) for t in inputs )
((lowercase) , ) : int =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowercase : Union[str, Any] =model(__A , labels=__A , head_mask=__A )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowercase , lowercase , lowercase : int =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__A ):
lowercase : Any =entropy(attn.detach() , __A )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__A ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowercase : Tuple =2
lowercase : Any =torch.pow(torch.pow(__A , __A ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
lowercase : List[str] =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(__A )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(__A )
logger.info('''Head ranked by importance scores''' )
lowercase : Any =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowercase : Union[str, Any] =torch.arange(
head_importance.numel() , device=args.device )
lowercase : int =head_ranks.view_as(__A )
print_ad_tensor(__A )
return attn_entropy, head_importance, total_loss
def lowercase_ ( __A : Any , __A : Dict , __A : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase , lowercase , lowercase : List[Any] =compute_heads_importance(__A , __A , __A , compute_entropy=__A )
lowercase : Tuple =1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __A , original_score * args.masking_threshold )
lowercase : Tuple =torch.ones_like(__A )
lowercase : str =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowercase : Any =original_score
while current_score >= original_score * args.masking_threshold:
lowercase : Optional[int] =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowercase : Dict =float('''Inf''' )
lowercase : List[Any] =head_importance.view(-1 ).sort()[1]
if len(__A ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowercase : List[Any] =current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
lowercase : Optional[int] =new_head_mask.view(-1 )
lowercase : Optional[Any] =0.0
lowercase : Dict =new_head_mask.view_as(__A )
lowercase : Any =new_head_mask.clone().detach()
print_ad_tensor(__A )
# Compute metric and head importance again
lowercase , lowercase , lowercase : List[Any] =compute_heads_importance(
__A , __A , __A , compute_entropy=__A , head_mask=__A )
lowercase : Optional[int] =1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('''Final head mask''' )
print_ad_tensor(__A )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowercase_ ( __A : Dict , __A : int , __A : Dict , __A : int ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] =datetime.now()
lowercase , lowercase , lowercase : int =compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A )
lowercase : Tuple =1 / loss
lowercase : Union[str, Any] =datetime.now() - before_time
lowercase : Dict =sum(p.numel() for p in model.parameters() )
lowercase : Tuple ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A ) )
}
for k, v in heads_to_prune.items():
if isinstance(__A , __A ):
lowercase : Optional[int] =[
v,
]
assert sum(len(__A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__A )
lowercase : str =sum(p.numel() for p in model.parameters() )
lowercase : Optional[Any] =datetime.now()
lowercase , lowercase , lowercase : List[str] =compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , )
lowercase : Any =1 / loss
lowercase : List[str] =datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __A , __A , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __A , __A )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 1_0_0 )
save_model(__A , args.output_dir )
def lowercase_ ( ) -> List[str]:
"""simple docstring"""
lowercase : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__A , type=__A , required=__A , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__A , type=__A , required=__A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__A , type=__A , required=__A , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__A , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__A , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__A , type=__A , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__A , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__A , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__A , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=__A , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=1_2_8 , type=__A , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__A , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=__A , default=4_2 )
parser.add_argument('''--local_rank''' , type=__A , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=__A , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=__A , default='''''' , help='''Can be used for distant debugging.''' )
lowercase : str =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowercase : Optional[Any] =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowercase : Dict =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowercase : Dict =torch.device('''cuda''' , args.local_rank )
lowercase : Dict =1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowercase : int =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowercase : Union[str, Any] =nn.parallel.DistributedDataParallel(
__A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A )
elif args.n_gpu > 1:
lowercase : Any =nn.DataParallel(__A )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__A )
torch.save(__A , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , __A )
# Prepare dataset
lowercase : Union[str, Any] =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowercase : Dict =(torch.from_numpy(__A ),)
lowercase : Dict =TensorDataset(*__A )
lowercase : Optional[Any] =RandomSampler(__A )
lowercase : List[Any] =DataLoader(__A , sampler=__A , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__A , __A , __A )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowercase : Tuple =mask_heads(__A , __A , __A )
prune_heads(__A , __A , __A , __A )
if __name__ == "__main__":
main()
| 8 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =list(poly_a or [0] )[:]
lowercase : Dict =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : Tuple =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[int] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : str =self.__multiply()
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase : List[Any] =self.c_max_length // 2
while next_ncol > 0:
lowercase : str =[[] for i in range(UpperCAmelCase )]
lowercase : List[str] =self.root**next_ncol
# First half of next step
lowercase : Union[str, Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Tuple =new_dft
lowercase : List[Any] =next_ncol // 2
return dft[0]
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.__dft('''A''' )
lowercase : Union[str, Any] =self.__dft('''B''' )
lowercase : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Tuple =2
while next_ncol <= self.c_max_length:
lowercase : Tuple =[[] for i in range(UpperCAmelCase )]
lowercase : Tuple =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : List[str] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] ='''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] ='''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : List[Any] =str(bin(__A ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : Union[str, Any] =str(bin(__A ) )[2:]
if shift_amount >= len(__A ):
return "0b0"
lowercase : Any =binary_number[: len(__A ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
lowercase : str ='''0''' + str(bin(__A ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase : Dict =len(bin(__A )[3:] ) # Find 2's complement of number
lowercase : Optional[Any] =bin(abs(__A ) - (1 << binary_number_length) )[3:]
lowercase : int =(
'''1''' + '''0''' * (binary_number_length - len(__A )) + binary_number
)
if shift_amount >= len(__A ):
return "0b" + binary_number[0] * len(__A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowercase_ ( __A : str = "AAPL" ) -> str:
"""simple docstring"""
lowercase : int =F'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
lowercase : List[Any] =BeautifulSoup(requests.get(__A ).text , '''html.parser''' )
lowercase : Union[str, Any] ='''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 8 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def A__ ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
pass
def lowercase_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =pipeline(
'''document-question-answering''' , model=UpperCAmelCase , tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Optional[Any] =INVOICE_URL
lowercase : Any =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
lowercase : Dict ='''What is the placebo?'''
lowercase : Optional[Any] =[
{
'''image''': load_image(UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =dqa_pipeline(UpperCAmelCase , top_k=2 )
self.assertEqual(
UpperCAmelCase , [
[
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Dict =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
lowercase : Union[str, Any] =INVOICE_URL
lowercase : Tuple ='''How many cats are there?'''
lowercase : Optional[int] =[
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowercase : Optional[Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
lowercase : List[str] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase : Optional[Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Any =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase : int ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Dict =[]
lowercase : str =[]
lowercase : str =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , words=UpperCAmelCase , boxes=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
lowercase : Dict =INVOICE_URL
lowercase : str ='''What is the invoice number?'''
lowercase : List[str] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
lowercase : Dict =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Union[str, Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : int =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Any =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : str ) -> Dict:
'''simple docstring'''
lowercase : Any =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : Tuple =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , )
lowercase : Tuple =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Dict =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : Any =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : str =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
lowercase : Tuple =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Dict =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : Dict =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : List[Any] =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
lowercase : str =INVOICE_URL
lowercase : int ='''What is the invoice number?'''
lowercase : Tuple =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Union[str, Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
lowercase : List[str] =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Union[str, Any] =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
lowercase : Any =INVOICE_URL
lowercase : Union[str, Any] ='''What is the invoice number?'''
lowercase : int =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
pass
| 8 | 1 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
SCREAMING_SNAKE_CASE = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowercase_ ( __A : Optional[Any] , __A : Tuple , __A : Dict , __A : Optional[int] , __A : List[str] ) -> Dict:
"""simple docstring"""
for attribute in key.split('''.''' ):
lowercase : Union[str, Any] =getattr(__A , __A )
if weight_type is not None:
lowercase : int =getattr(__A , __A ).shape
else:
lowercase : str =hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowercase : List[str] =value
elif weight_type == "weight_g":
lowercase : Optional[int] =value
elif weight_type == "weight_v":
lowercase : List[str] =value
elif weight_type == "bias":
lowercase : str =value
else:
lowercase : Optional[Any] =value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowercase_ ( __A : int , __A : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[Any] =[]
lowercase : int =fairseq_model.state_dict()
lowercase : Union[str, Any] =hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase : Optional[Any] =False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == '''group''' , )
lowercase : Optional[Any] =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase : Any =True
if "*" in mapped_key:
lowercase : Any =name.split(__A )[0].split('''.''' )[-2]
lowercase : int =mapped_key.replace('''*''' , __A )
if "weight_g" in name:
lowercase : List[str] ='''weight_g'''
elif "weight_v" in name:
lowercase : str ='''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
lowercase : int ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : List[Any] ='''weight'''
else:
lowercase : Optional[int] =None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F'Unused weights: {unused_weights}' )
def lowercase_ ( __A : Any , __A : Optional[Any] , __A : Dict , __A : Union[str, Any] , __A : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[Any] =full_name.split('''conv_layers.''' )[-1]
lowercase : Dict =name.split('''.''' )
lowercase : List[str] =int(items[0] )
lowercase : Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowercase : Optional[int] =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowercase : Tuple =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowercase : Union[str, Any] =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowercase : Optional[Any] =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__A )
@torch.no_grad()
def lowercase_ ( __A : Any , __A : Tuple , __A : Dict=None ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =torch.load(__A )
lowercase : Any =WavLMConfigOrig(checkpoint['''cfg'''] )
lowercase : int =WavLMOrig(__A )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
lowercase : Tuple =WavLMConfig.from_pretrained(__A )
else:
lowercase : Any =WavLMConfig()
lowercase : Dict =WavLMModel(__A )
recursively_load_weights(__A , __A )
hf_wavlm.save_pretrained(__A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 8 |
'''simple docstring'''
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A ) , __A )
return number - int(__A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 8 | 1 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =0
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : str =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : int =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str =CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase : Optional[int] =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : Optional[Any] =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase : str =CLIPImageProcessor(**UpperCAmelCase )
# save in new folder
model_config.save_pretrained(UpperCAmelCase )
config.save_pretrained(UpperCAmelCase )
lowercase : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : int =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained('''clip-base''' )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Optional[int] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
lowercase : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
lowercase : List[str] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[int] =CustomImageProcessor.from_pretrained(UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self : Any ) -> Any:
'''simple docstring'''
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# If remote code is not set, the default is to use local
lowercase : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase : Tuple =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Dict =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 8 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : List[Any] , __A : int , __A : int ) -> Optional[int]:
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowercase_ ( __A : np.ndarray , __A : Optional[str] , __A : Optional[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : int =to_pil_image(__A )
lowercase , lowercase : Tuple =pil_image.size
lowercase : Optional[Any] =pytesseract.image_to_data(__A , lang=__A , output_type='''dict''' , config=__A )
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase : Dict =[idx for idx, word in enumerate(__A ) if not word.strip()]
lowercase : str =[word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : Optional[int] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : List[Any] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : str =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : int =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase : Tuple =[]
for x, y, w, h in zip(__A , __A , __A , __A ):
lowercase : str =[x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
lowercase : List[str] =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : float = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = "" , **UpperCAmelCase : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Tuple =size if size is not None else {'''height''': 224, '''width''': 224}
lowercase : Optional[Any] =get_size_dict(UpperCAmelCase )
lowercase : Optional[Any] =do_resize
lowercase : List[Any] =size
lowercase : List[str] =resample
lowercase : Dict =do_rescale
lowercase : str =rescale_value
lowercase : Optional[int] =do_normalize
lowercase : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Union[str, Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase : List[Any] =apply_ocr
lowercase : Union[str, Any] =ocr_lang
lowercase : str =tesseract_config
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase : Tuple =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase : Optional[Any] =(size['''height'''], size['''width'''])
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase : Optional[int] =do_resize if do_resize is not None else self.do_resize
lowercase : Tuple =size if size is not None else self.size
lowercase : Optional[int] =get_size_dict(UpperCAmelCase )
lowercase : List[str] =resample if resample is not None else self.resample
lowercase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Optional[int] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[Any] =image_mean if image_mean is not None else self.image_mean
lowercase : Optional[int] =image_std if image_std is not None else self.image_std
lowercase : Any =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase : Any =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase : Dict =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase : str =make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowercase : Tuple =[to_numpy_array(UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase : int =[]
lowercase : Tuple =[]
for image in images:
lowercase , lowercase : Dict =apply_tesseract(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
words_batch.append(UpperCAmelCase )
boxes_batch.append(UpperCAmelCase )
if do_resize:
lowercase : int =[self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowercase : str =[self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowercase : Optional[Any] =[to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase : Dict =BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase )
if apply_ocr:
lowercase : int =words_batch
lowercase : List[str] =boxes_batch
return data
| 8 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Dict=7 , UpperCAmelCase : List[str]=True , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=True , UpperCAmelCase : Tuple=99 , UpperCAmelCase : str=32 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : int=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : Optional[Any]="gelu" , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : Any=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : int=0.0_2 , UpperCAmelCase : Dict=3 , UpperCAmelCase : int=4 , UpperCAmelCase : Any=None , ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : List[Any] =13
lowercase : List[str] =7
lowercase : List[str] =True
lowercase : Optional[Any] =True
lowercase : Dict =True
lowercase : Optional[int] =True
lowercase : int =99
lowercase : Any =384
lowercase : List[str] =2
lowercase : List[Any] =4
lowercase : Dict =37
lowercase : str ='''gelu'''
lowercase : List[str] =0.1
lowercase : Union[str, Any] =0.1
lowercase : Any =512
lowercase : Any =16
lowercase : Union[str, Any] =2
lowercase : Union[str, Any] =0.0_2
lowercase : Tuple =3
lowercase : int =4
lowercase : Optional[int] =128
lowercase : Dict =2
lowercase : Optional[int] =9
lowercase : Union[str, Any] =1
lowercase : str =None
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : List[Any] =None
if self.use_input_mask:
lowercase : Any =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Dict =None
if self.use_token_type_ids:
lowercase : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : int =None
lowercase : str =None
lowercase : Dict =None
if self.use_labels:
lowercase : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : int =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Optional[Any] =ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] =TFConvBertModel(config=UpperCAmelCase )
lowercase : Any ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Optional[int] =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : List[str] =TFConvBertForMaskedLM(config=UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : str , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : str =TFConvBertForSequenceClassification(config=UpperCAmelCase )
lowercase : List[Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : int =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_choices
lowercase : Optional[Any] =TFConvBertForMultipleChoice(config=UpperCAmelCase )
lowercase : str =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[str] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
lowercase : List[str] =self.num_labels
lowercase : Optional[Any] =TFConvBertForTokenClassification(config=UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =TFConvBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : str ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[Any] =config_and_inputs
lowercase : List[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : List[Any] ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFConvBertModelTester(self )
lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : str ) -> Tuple:
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : int ) -> Dict:
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
lowercase , lowercase : Any =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : str =True
lowercase : Dict =True
if hasattr(UpperCAmelCase , '''use_cache''' ):
lowercase : Optional[int] =True
lowercase : Dict =getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
lowercase : Tuple =getattr(self.model_tester , '''key_length''' , UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase : Tuple =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowercase : Union[str, Any] =model_class(UpperCAmelCase )
lowercase : Any =len(model(UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase )
lowercase : List[Any] =os.path.join(UpperCAmelCase , '''saved_model''' , '''1''' )
lowercase : str =tf.keras.models.load_model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
if self.is_encoder_decoder:
lowercase : Tuple =outputs['''encoder_hidden_states''']
lowercase : str =outputs['''encoder_attentions''']
else:
lowercase : Optional[Any] =outputs['''hidden_states''']
lowercase : Optional[int] =outputs['''attentions''']
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowercase : Dict =getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def A__ ( self : int ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self : Optional[int] ) -> str:
'''simple docstring'''
lowercase , lowercase : Dict =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Union[str, Any] =True
lowercase : Dict =getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
lowercase : Optional[Any] =getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
lowercase : int =getattr(self.model_tester , '''key_length''' , UpperCAmelCase )
lowercase : List[str] =getattr(self.model_tester , '''key_length''' , UpperCAmelCase )
def check_decoder_attentions_output(UpperCAmelCase : Any ):
lowercase : Union[str, Any] =len(UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
lowercase : Union[str, Any] =outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase : Union[str, Any] ):
lowercase : str =[
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowercase : Optional[int] =True
lowercase : Union[str, Any] =False
lowercase : Tuple =model_class(UpperCAmelCase )
lowercase : Optional[int] =model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase : Dict =len(UpperCAmelCase )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
if self.is_encoder_decoder:
lowercase : Dict =model_class(UpperCAmelCase )
lowercase : List[str] =model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_decoder_attentions_output(UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowercase : Any =True
lowercase : Union[str, Any] =model_class(UpperCAmelCase )
lowercase : Optional[Any] =model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
# Check attention is always last and order is fine
lowercase : Union[str, Any] =True
lowercase : str =True
lowercase : Optional[Any] =model_class(UpperCAmelCase )
lowercase : Dict =model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase : Any =TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
lowercase : List[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : Dict =model(UpperCAmelCase )[0]
lowercase : List[Any] =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 8 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =parent
lowercase : Any =13
lowercase : Any =7
lowercase : Optional[int] =True
lowercase : Optional[int] =True
lowercase : Tuple =False
lowercase : Optional[Any] =True
lowercase : Dict =99
lowercase : Union[str, Any] =32
lowercase : Union[str, Any] =2
lowercase : Union[str, Any] =4
lowercase : List[str] =37
lowercase : str ='''gelu'''
lowercase : Dict =0.1
lowercase : List[Any] =0.1
lowercase : List[str] =512
lowercase : Optional[int] =16
lowercase : Optional[Any] =2
lowercase : List[str] =0.0_2
lowercase : Any =3
lowercase : Optional[Any] =4
lowercase : int =None
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
lowercase : Any =None
lowercase : str =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =TFDistilBertModel(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(UpperCAmelCase )
lowercase : str =[input_ids, input_mask]
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =TFDistilBertForMaskedLM(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str =TFDistilBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Optional[Any] =TFDistilBertForSequenceClassification(UpperCAmelCase )
lowercase : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Optional[int] =TFDistilBertForMultipleChoice(UpperCAmelCase )
lowercase : Optional[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Tuple =TFDistilBertForTokenClassification(UpperCAmelCase )
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : str =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Union[str, Any] =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : str =TFDistilBertModelTester(self )
lowercase : int =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase : Union[str, Any] =TFDistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : Tuple =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[Any] =model(UpperCAmelCase )[0]
lowercase : str =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Optional[int] =tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 8 | 1 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =list(poly_a or [0] )[:]
lowercase : Dict =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : Tuple =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[int] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : str =self.__multiply()
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase : List[Any] =self.c_max_length // 2
while next_ncol > 0:
lowercase : str =[[] for i in range(UpperCAmelCase )]
lowercase : List[str] =self.root**next_ncol
# First half of next step
lowercase : Union[str, Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Tuple =new_dft
lowercase : List[Any] =next_ncol // 2
return dft[0]
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.__dft('''A''' )
lowercase : Union[str, Any] =self.__dft('''B''' )
lowercase : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Tuple =2
while next_ncol <= self.c_max_length:
lowercase : Tuple =[[] for i in range(UpperCAmelCase )]
lowercase : Tuple =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : List[str] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] ='''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] ='''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
SCREAMING_SNAKE_CASE = tuple[int, int]
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Node | None , ) -> None:
'''simple docstring'''
lowercase : List[Any] =pos_x
lowercase : Tuple =pos_y
lowercase : Any =(pos_y, pos_x)
lowercase : Union[str, Any] =goal_x
lowercase : Any =goal_y
lowercase : Tuple =g_cost
lowercase : Optional[int] =parent
lowercase : str =self.calculate_heuristic()
lowercase : Optional[Any] =self.g_cost + self.h_cost
def A__ ( self : Any ) -> float:
'''simple docstring'''
lowercase : List[Any] =self.pos_x - self.goal_x
lowercase : Dict =self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(UpperCAmelCase ) + abs(UpperCAmelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : int , UpperCAmelCase : Node ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : TPosition , UpperCAmelCase : TPosition ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] =Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCAmelCase )
lowercase : str =Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , UpperCAmelCase )
lowercase : Tuple =[self.start]
lowercase : list[Node] =[]
lowercase : Optional[int] =False
def A__ ( self : int ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowercase : Optional[Any] =self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(UpperCAmelCase )
self.closed_nodes.append(UpperCAmelCase )
lowercase : Optional[Any] =self.get_successors(UpperCAmelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCAmelCase )
else:
# retrieve the best current path
lowercase : List[str] =self.open_nodes.pop(self.open_nodes.index(UpperCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCAmelCase )
else:
self.open_nodes.append(UpperCAmelCase )
return [self.start.pos]
def A__ ( self : List[Any] , UpperCAmelCase : Node ) -> list[Node]:
'''simple docstring'''
lowercase : Optional[Any] =[]
for action in delta:
lowercase : Optional[int] =parent.pos_x + action[1]
lowercase : List[str] =parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCAmelCase , UpperCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCAmelCase , ) )
return successors
def A__ ( self : Tuple , UpperCAmelCase : Node | None ) -> list[TPosition]:
'''simple docstring'''
lowercase : Union[str, Any] =node
lowercase : Union[str, Any] =[]
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowercase : Tuple =current_node.parent
path.reverse()
return path
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : TPosition , UpperCAmelCase : TPosition ) -> None:
'''simple docstring'''
lowercase : List[Any] =AStar(UpperCAmelCase , UpperCAmelCase )
lowercase : Optional[Any] =AStar(UpperCAmelCase , UpperCAmelCase )
lowercase : List[str] =False
def A__ ( self : Optional[Any] ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowercase : Tuple =self.fwd_astar.open_nodes.pop(0 )
lowercase : Union[str, Any] =self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
UpperCAmelCase , UpperCAmelCase )
self.fwd_astar.closed_nodes.append(UpperCAmelCase )
self.bwd_astar.closed_nodes.append(UpperCAmelCase )
lowercase : Optional[int] =current_bwd_node
lowercase : Optional[Any] =current_fwd_node
lowercase : Optional[Any] ={
self.fwd_astar: self.fwd_astar.get_successors(UpperCAmelCase ),
self.bwd_astar: self.bwd_astar.get_successors(UpperCAmelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(UpperCAmelCase )
else:
# retrieve the best current path
lowercase : Optional[Any] =astar.open_nodes.pop(
astar.open_nodes.index(UpperCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(UpperCAmelCase )
else:
astar.open_nodes.append(UpperCAmelCase )
return [self.fwd_astar.start.pos]
def A__ ( self : int , UpperCAmelCase : Node , UpperCAmelCase : Node ) -> list[TPosition]:
'''simple docstring'''
lowercase : Any =self.fwd_astar.retrace_path(UpperCAmelCase )
lowercase : Tuple =self.bwd_astar.retrace_path(UpperCAmelCase )
bwd_path.pop()
bwd_path.reverse()
lowercase : Optional[int] =fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
SCREAMING_SNAKE_CASE = (0, 0)
SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
SCREAMING_SNAKE_CASE = time.time()
SCREAMING_SNAKE_CASE = AStar(init, goal)
SCREAMING_SNAKE_CASE = a_star.search()
SCREAMING_SNAKE_CASE = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
SCREAMING_SNAKE_CASE = time.time()
SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 8 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version('torch'))
def lowercase_ ( __A : Union[str, Version] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase : Any =STR_OPERATION_TO_FUNC[operation]
if isinstance(__A , __A ):
lowercase : List[Any] =parse(importlib.metadata.version(__A ) )
return operation(__A , parse(__A ) )
def lowercase_ ( __A : str , __A : str ) -> Tuple:
"""simple docstring"""
return compare_versions(__A , __A , __A )
| 8 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( __A : list[int] , __A : list[int] , __A : int ) -> tuple[float, list[float]]:
"""simple docstring"""
lowercase : Dict =list(range(len(__A ) ) )
lowercase : Dict =[v / w for v, w in zip(__A , __A )]
index.sort(key=lambda __A : ratio[i] , reverse=__A )
lowercase : float =0
lowercase : list[float] =[0] * len(__A )
for i in index:
if weight[i] <= capacity:
lowercase : int =1
max_value += value[i]
capacity -= weight[i]
else:
lowercase : Dict =capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase_ ( __A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
lowercase : List[Any] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
lowercase : List[str] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
lowercase : Union[str, Any] =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 8 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int=3 , UpperCAmelCase : Dict=32 , UpperCAmelCase : Dict=3 , UpperCAmelCase : str=10 , UpperCAmelCase : Any=[8, 16, 32, 64] , UpperCAmelCase : int=[1, 1, 2, 1] , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Tuple="relu" , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[int]=["stage2", "stage3", "stage4"] , UpperCAmelCase : int=[2, 3, 4] , UpperCAmelCase : Tuple=1 , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[int] =parent
lowercase : List[Any] =batch_size
lowercase : str =image_size
lowercase : List[Any] =num_channels
lowercase : Union[str, Any] =embeddings_size
lowercase : str =hidden_sizes
lowercase : str =depths
lowercase : Any =is_training
lowercase : int =use_labels
lowercase : List[Any] =hidden_act
lowercase : Any =num_labels
lowercase : Optional[Any] =scope
lowercase : Optional[Any] =len(UpperCAmelCase )
lowercase : Optional[Any] =out_features
lowercase : Dict =out_indices
lowercase : List[str] =num_groups
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Dict =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.num_labels )
lowercase : Union[str, Any] =self.get_config()
return config, pixel_values, labels
def A__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def A__ ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Any ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =BitModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> str:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Dict =BitForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : List[Any] =model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : Dict =BitBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : List[str] =model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase : List[Any] =None
lowercase : Union[str, Any] =BitBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : str =model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self : str ) -> int:
'''simple docstring'''
lowercase : Dict =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : Any =config_and_inputs
lowercase : Dict ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase_ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : Dict =BitModelTester(self )
lowercase : List[Any] =ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def A__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return
@unittest.skip(reason='''Bit does not output attentions''' )
def A__ ( self : int ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def A__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def A__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def A__ ( self : str ) -> int:
'''simple docstring'''
lowercase , lowercase : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[int] =model_class(UpperCAmelCase )
lowercase : Any =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Tuple =[*signature.parameters.keys()]
lowercase : Optional[int] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase , lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[Any] =model_class(config=UpperCAmelCase )
for name, module in model.named_modules():
if isinstance(UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def A__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] ):
lowercase : Dict =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase : Tuple =model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase : List[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : Optional[Any] =self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : str =['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase : Union[str, Any] =layer_type
lowercase : List[Any] =True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : str =True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
def A__ ( self : Any ) -> Any:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def A__ ( self : Any ) -> int:
'''simple docstring'''
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Union[str, Any] =BitModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def A__ ( self : int ) -> int:
'''simple docstring'''
lowercase : Tuple =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCAmelCase )
lowercase : Union[str, Any] =self.default_image_processor
lowercase : int =prepare_img()
lowercase : Union[str, Any] =image_processor(images=UpperCAmelCase , return_tensors='''pt''' ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase : Tuple =model(**UpperCAmelCase )
# verify the logits
lowercase : Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowercase : List[Any] =torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
@require_torch
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase_ = BitConfig
UpperCamelCase_ = False
def A__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =BitModelTester(self )
| 8 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 1 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE = logging.getLogger()
def lowercase_ ( __A : Path , __A : list ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[Any] ='''\n'''.join(__A )
Path(__A ).open('''w''' ).writelines(__A )
SCREAMING_SNAKE_CASE = 'patrickvonplaten/t5-tiny-random'
SCREAMING_SNAKE_CASE = 'sshleifer/bart-tiny-random'
SCREAMING_SNAKE_CASE = 'sshleifer/tiny-mbart'
SCREAMING_SNAKE_CASE = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def A__ ( self : Optional[Any] , UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
lowercase : List[str] =Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
lowercase : Optional[int] =input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
lowercase : List[str] =[''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(UpperCAmelCase , UpperCAmelCase )
lowercase : Union[str, Any] =str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
lowercase : Tuple ='''translation_en_to_de''' if model == T5_TINY else '''summarization'''
lowercase : int =f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(UpperCAmelCase , '''argv''' , UpperCAmelCase ):
run_generate()
assert Path(UpperCAmelCase ).exists()
# os.remove(Path(output_file_name))
def A__ ( self : Dict ) -> int:
'''simple docstring'''
self.run_eval_tester(UpperCAmelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def A__ ( self : Any , UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.run_eval_tester(UpperCAmelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def A__ ( self : List[str] , UpperCAmelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
lowercase : Optional[Any] =input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
lowercase : Union[str, Any] ={
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
lowercase : List[str] =Path(self.get_auto_remove_tmp_dir() )
lowercase : Any =str(tmp_dir / '''scores.json''' )
lowercase : Union[str, Any] =str(tmp_dir / '''val.target''' )
_dump_articles(UpperCAmelCase , text['''en'''] )
_dump_articles(UpperCAmelCase , text['''de'''] )
lowercase : Tuple ='''translation_en_to_de''' if model == T5_TINY else '''summarization'''
lowercase : str =f'\n run_eval_search.py\n {model}\n {str(UpperCAmelCase )}\n {str(UpperCAmelCase )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(UpperCAmelCase , '''argv''' , UpperCAmelCase ):
with CaptureStdout() as cs:
run_search()
lowercase : List[Any] =[''' num_beams | length_penalty''', model, '''Best score args''']
lowercase : List[Any] =['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(UpperCAmelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(UpperCAmelCase ).exists()
os.remove(Path(UpperCAmelCase ) )
| 8 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 1 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
SCREAMING_SNAKE_CASE = sys.version_info >= (3, 10)
def lowercase_ ( __A : Tuple=None , __A : List[str]=None ) -> Union[str, Any]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=__A )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = field(default='''toto''' , metadata={'''help''': '''help message'''} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = False
UpperCamelCase_ = True
UpperCamelCase_ = None
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''titi'''
UpperCamelCase_ = '''toto'''
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''titi'''
UpperCamelCase_ = '''toto'''
UpperCamelCase_ = 42
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = "toto"
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : List[str] =BasicEnum(self.foo )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = "toto"
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : Optional[Any] =MixedTypeEnum(self.foo )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = None
UpperCamelCase_ = field(default=__A , metadata={'''help''': '''help message'''} )
UpperCamelCase_ = None
UpperCamelCase_ = list_field(default=[] )
UpperCamelCase_ = list_field(default=[] )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = list_field(default=[] )
UpperCamelCase_ = list_field(default=[1, 2, 3] )
UpperCamelCase_ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
UpperCamelCase_ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = field()
UpperCamelCase_ = field()
UpperCamelCase_ = field()
def A__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =BasicEnum(self.required_enum )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = field()
UpperCamelCase_ = None
UpperCamelCase_ = field(default='''toto''' , metadata={'''help''': '''help message'''} )
UpperCamelCase_ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = False
UpperCamelCase_ = True
UpperCamelCase_ = None
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = None
UpperCamelCase_ = field(default=__A , metadata={'''help''': '''help message'''} )
UpperCamelCase_ = None
UpperCamelCase_ = list_field(default=[] )
UpperCamelCase_ = list_field(default=[] )
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : List[Any] , UpperCAmelCase : argparse.ArgumentParser , UpperCAmelCase : argparse.ArgumentParser ) -> Any:
'''simple docstring'''
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowercase : Optional[Any] ={k: v for k, v in vars(UpperCAmelCase ).items() if k != '''container'''}
lowercase : Union[str, Any] ={k: v for k, v in vars(UpperCAmelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , UpperCAmelCase ) and yy.get('''choices''' , UpperCAmelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](UpperCAmelCase ) , yy['''type'''](UpperCAmelCase ) )
del xx["type"], yy["type"]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> Any:
'''simple docstring'''
lowercase : List[str] =HfArgumentParser(UpperCAmelCase )
lowercase : Any =argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=UpperCAmelCase , required=UpperCAmelCase )
expected.add_argument('''--bar''' , type=UpperCAmelCase , required=UpperCAmelCase )
expected.add_argument('''--baz''' , type=UpperCAmelCase , required=UpperCAmelCase )
expected.add_argument('''--flag''' , type=UpperCAmelCase , default=UpperCAmelCase , const=UpperCAmelCase , nargs='''?''' )
self.argparsersEqual(UpperCAmelCase , UpperCAmelCase )
lowercase : Optional[Any] =['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((lowercase) , ) : str =parser.parse_args_into_dataclasses(UpperCAmelCase , look_for_args_file=UpperCAmelCase )
self.assertFalse(example.flag )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
lowercase : Union[str, Any] =HfArgumentParser(UpperCAmelCase )
lowercase : str =argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=UpperCAmelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=UpperCAmelCase , help='''help message''' )
self.argparsersEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=UpperCAmelCase , default=UpperCAmelCase , const=UpperCAmelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=UpperCAmelCase , default=UpperCAmelCase , const=UpperCAmelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=UpperCAmelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=UpperCAmelCase , default=UpperCAmelCase )
lowercase : Optional[int] =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase )
for dataclass_type in dataclass_types:
lowercase : Any =HfArgumentParser(UpperCAmelCase )
self.argparsersEqual(UpperCAmelCase , UpperCAmelCase )
lowercase : Any =parser.parse_args([] )
self.assertEqual(UpperCAmelCase , Namespace(foo=UpperCAmelCase , baz=UpperCAmelCase , opt=UpperCAmelCase ) )
lowercase : Optional[Any] =parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(UpperCAmelCase , Namespace(foo=UpperCAmelCase , baz=UpperCAmelCase , opt=UpperCAmelCase ) )
lowercase : Optional[Any] =parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(UpperCAmelCase , Namespace(foo=UpperCAmelCase , baz=UpperCAmelCase , opt=UpperCAmelCase ) )
lowercase : int =parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(UpperCAmelCase , Namespace(foo=UpperCAmelCase , baz=UpperCAmelCase , opt=UpperCAmelCase ) )
lowercase : Optional[int] =parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(UpperCAmelCase , Namespace(foo=UpperCAmelCase , baz=UpperCAmelCase , opt=UpperCAmelCase ) )
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =HfArgumentParser(UpperCAmelCase )
lowercase : Optional[Any] =argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(UpperCAmelCase , UpperCAmelCase )
lowercase : Union[str, Any] =parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
lowercase : Dict =parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowercase : Union[str, Any] =parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
lowercase : Union[str, Any] =parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowercase : int =parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
lowercase : List[Any] =parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def A__ ( self : Optional[int] ) -> int:
'''simple docstring'''
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = "toto"
lowercase : int =HfArgumentParser(UpperCAmelCase )
lowercase : Optional[Any] =argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(UpperCAmelCase , UpperCAmelCase )
lowercase : str =parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
lowercase : Tuple =parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
lowercase : str =parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def A__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str =HfArgumentParser(UpperCAmelCase )
lowercase : List[str] =argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=UpperCAmelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=UpperCAmelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=UpperCAmelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase )
self.argparsersEqual(UpperCAmelCase , UpperCAmelCase )
lowercase : Optional[Any] =parser.parse_args([] )
self.assertEqual(
UpperCAmelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
lowercase : List[str] =parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(UpperCAmelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def A__ ( self : str ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=UpperCAmelCase , type=UpperCAmelCase )
expected.add_argument('''--bar''' , default=UpperCAmelCase , type=UpperCAmelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=UpperCAmelCase , type=UpperCAmelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=UpperCAmelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=UpperCAmelCase )
lowercase : int =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase )
for dataclass_type in dataclass_types:
lowercase : Union[str, Any] =HfArgumentParser(UpperCAmelCase )
self.argparsersEqual(UpperCAmelCase , UpperCAmelCase )
lowercase : Any =parser.parse_args([] )
self.assertEqual(UpperCAmelCase , Namespace(foo=UpperCAmelCase , bar=UpperCAmelCase , baz=UpperCAmelCase , ces=[] , des=[] ) )
lowercase : Tuple =parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(UpperCAmelCase , Namespace(foo=12 , bar=3.1_4 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : str =HfArgumentParser(UpperCAmelCase )
lowercase : Dict =argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=UpperCAmelCase , required=UpperCAmelCase )
expected.add_argument('''--required_str''' , type=UpperCAmelCase , required=UpperCAmelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=UpperCAmelCase , )
self.argparsersEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[int] =HfArgumentParser(UpperCAmelCase )
lowercase : Union[str, Any] =argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=UpperCAmelCase , required=UpperCAmelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=UpperCAmelCase , )
expected.add_argument('''--opt''' , type=UpperCAmelCase , default=UpperCAmelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=UpperCAmelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=UpperCAmelCase )
self.argparsersEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
lowercase : int =HfArgumentParser(UpperCAmelCase )
lowercase : List[Any] ={
'''foo''': 12,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
lowercase : Tuple =parser.parse_dict(UpperCAmelCase )[0]
lowercase : Optional[Any] =BasicExample(**UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[int] =HfArgumentParser(UpperCAmelCase )
lowercase : Tuple ={
'''foo''': 12,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(UpperCAmelCase , parser.parse_dict , UpperCAmelCase , allow_extra_keys=UpperCAmelCase )
def A__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =HfArgumentParser(UpperCAmelCase )
lowercase : Tuple ={
'''foo''': 12,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : int =os.path.join(UpperCAmelCase , '''temp_json''' )
os.mkdir(UpperCAmelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase )
lowercase : Union[str, Any] =parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
lowercase : str =BasicExample(**UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =HfArgumentParser(UpperCAmelCase )
lowercase : Dict ={
'''foo''': 12,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Union[str, Any] =os.path.join(UpperCAmelCase , '''temp_yaml''' )
os.mkdir(UpperCAmelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(UpperCAmelCase , UpperCAmelCase )
lowercase : Union[str, Any] =parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
lowercase : str =BasicExample(**UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowercase : Optional[int] =HfArgumentParser(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
| 8 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
SCREAMING_SNAKE_CASE = {'allegro/herbert-base-cased': 514}
SCREAMING_SNAKE_CASE = {}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = HerbertTokenizer
def __init__( self : Dict , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : str="<pad>" , UpperCAmelCase : Optional[Any]="<mask>" , UpperCAmelCase : List[str]="</s>" , **UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sep_token=UpperCAmelCase , **UpperCAmelCase , )
def A__ ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : List[Any] =[self.cls_token_id]
lowercase : Any =[self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[Any] =[self.sep_token_id]
lowercase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase : List[Any] =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 8 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''trocr'''
UpperCamelCase_ = ['''past_key_values''']
UpperCamelCase_ = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self : Dict , UpperCAmelCase : Any=5_0265 , UpperCAmelCase : Any=1024 , UpperCAmelCase : List[str]=12 , UpperCAmelCase : str=16 , UpperCAmelCase : Dict=4096 , UpperCAmelCase : int="gelu" , UpperCAmelCase : Tuple=512 , UpperCAmelCase : str=0.1 , UpperCAmelCase : str=0.0 , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Dict=0.0_2 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : int=True , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : int=1 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : str=2 , **UpperCAmelCase : List[Any] , ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =vocab_size
lowercase : List[str] =d_model
lowercase : str =decoder_layers
lowercase : Any =decoder_attention_heads
lowercase : List[Any] =decoder_ffn_dim
lowercase : Optional[Any] =activation_function
lowercase : List[Any] =max_position_embeddings
lowercase : Optional[int] =dropout
lowercase : str =attention_dropout
lowercase : Tuple =activation_dropout
lowercase : Union[str, Any] =init_std
lowercase : str =decoder_layerdrop
lowercase : str =use_cache
lowercase : int =scale_embedding
lowercase : Any =use_learned_position_embeddings
lowercase : List[Any] =layernorm_embedding
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , **UpperCAmelCase , )
| 8 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Any ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[int] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Any ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Tuple ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : List[str] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : Tuple =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Optional[Any] ='''1'''
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : str ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Optional[int] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Optional[Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : str =self.get_env()
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Any ='''1'''
lowercase : Optional[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''
from transformers import pipeline
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase : Tuple =self.get_env()
lowercase : Optional[int] ='''1'''
lowercase : Union[str, Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase : Dict =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] ='''
from transformers import AutoModel
'''
lowercase : Dict ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : int =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : List[str] ='''1'''
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 8 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''AutoImageProcessor'''
UpperCamelCase_ = '''AutoTokenizer'''
def __init__( self : Dict , UpperCAmelCase : int , UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
super().__init__(UpperCAmelCase , UpperCAmelCase )
lowercase : Union[str, Any] =self.image_processor
def __call__( self : Dict , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : int=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase : Optional[int] =self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if images is not None:
lowercase : Dict =self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
lowercase : Optional[int] =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def A__ ( self : Union[str, Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ) -> List[str]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 8 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : int ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowercase : Stack[int] =Stack()
lowercase : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
lowercase : Optional[Any] =operator_stack.peek()
operator_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : List[str] =operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 8 | 1 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : int ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowercase : Stack[int] =Stack()
lowercase : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
lowercase : Optional[Any] =operator_stack.peek()
operator_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : List[str] =operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 8 |
'''simple docstring'''
import re
def lowercase_ ( __A : str ) -> bool:
"""simple docstring"""
lowercase : Any =re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__A , __A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 8 | 1 |
'''simple docstring'''
import math
def lowercase_ ( __A : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase_ ( __A : float = 0.1 ) -> int:
"""simple docstring"""
lowercase : Optional[Any] =3
lowercase : Union[str, Any] =3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__A )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 8 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : str =1
lowercase : Any =3
lowercase : Dict =(32, 32)
lowercase : str =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase )
return image
@property
def A__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Any =UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def A__ ( self : List[str] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Dict =AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def A__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase : List[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(UpperCAmelCase )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : List[str] =self.dummy_cond_unet_upscale
lowercase : Optional[int] =DDPMScheduler()
lowercase : Tuple =DDIMScheduler(prediction_type='''v_prediction''' )
lowercase : Optional[int] =self.dummy_vae
lowercase : Any =self.dummy_text_encoder
lowercase : Tuple =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : Any =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase : int =Image.fromarray(np.uinta(UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowercase : List[str] =StableDiffusionUpscalePipeline(
unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , max_noise_level=350 , )
lowercase : List[Any] =sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : Optional[Any] ='''A painting of a squirrel eating a burger'''
lowercase : str =torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase : Dict =sd_pipe(
[prompt] , image=UpperCAmelCase , generator=UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
lowercase : int =output.images
lowercase : Union[str, Any] =torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase : Union[str, Any] =sd_pipe(
[prompt] , image=UpperCAmelCase , generator=UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCAmelCase , )[0]
lowercase : int =image[0, -3:, -3:, -1]
lowercase : Optional[int] =image_from_tuple[0, -3:, -3:, -1]
lowercase : Union[str, Any] =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowercase : Optional[int] =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self : Optional[int] ) -> int:
'''simple docstring'''
lowercase : List[Any] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : List[str] =self.dummy_cond_unet_upscale
lowercase : Optional[Any] =DDPMScheduler()
lowercase : int =DDIMScheduler(prediction_type='''v_prediction''' )
lowercase : int =self.dummy_vae
lowercase : Optional[int] =self.dummy_text_encoder
lowercase : List[Any] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : str =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase : Dict =Image.fromarray(np.uinta(UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowercase : Union[str, Any] =StableDiffusionUpscalePipeline(
unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , max_noise_level=350 , )
lowercase : str =sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : Optional[Any] ='''A painting of a squirrel eating a burger'''
lowercase : List[str] =sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
lowercase : Optional[Any] =output.images
assert image.shape[0] == 2
lowercase : List[str] =torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase : Tuple =sd_pipe(
[prompt] , image=UpperCAmelCase , generator=UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
lowercase : List[Any] =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def A__ ( self : Any ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =self.dummy_cond_unet_upscale
lowercase : List[str] =DDPMScheduler()
lowercase : Union[str, Any] =DDIMScheduler(prediction_type='''v_prediction''' )
lowercase : List[str] =self.dummy_vae
lowercase : Optional[Any] =self.dummy_text_encoder
lowercase : Any =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : List[Any] =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase : Optional[int] =Image.fromarray(np.uinta(UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowercase : Union[str, Any] =unet.half()
lowercase : Union[str, Any] =text_encoder.half()
# make sure here that pndm scheduler skips prk
lowercase : Dict =StableDiffusionUpscalePipeline(
unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , max_noise_level=350 , )
lowercase : int =sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : List[Any] ='''A painting of a squirrel eating a burger'''
lowercase : List[str] =torch.manual_seed(0 )
lowercase : Union[str, Any] =sd_pipe(
[prompt] , image=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=2 , output_type='''np''' , ).images
lowercase : List[str] =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : Any ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowercase : List[str] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
lowercase : List[str] ='''stabilityai/stable-diffusion-x4-upscaler'''
lowercase : Tuple =StableDiffusionUpscalePipeline.from_pretrained(UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase : List[str] ='''a cat sitting on a park bench'''
lowercase : Any =torch.manual_seed(0 )
lowercase : Dict =pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , generator=UpperCAmelCase , output_type='''np''' , )
lowercase : List[Any] =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def A__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowercase : Any =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
lowercase : Optional[Any] ='''stabilityai/stable-diffusion-x4-upscaler'''
lowercase : Union[str, Any] =StableDiffusionUpscalePipeline.from_pretrained(
UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase : Union[str, Any] ='''a cat sitting on a park bench'''
lowercase : Any =torch.manual_seed(0 )
lowercase : Optional[Any] =pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , generator=UpperCAmelCase , output_type='''np''' , )
lowercase : Any =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def A__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase : List[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowercase : Optional[int] ='''stabilityai/stable-diffusion-x4-upscaler'''
lowercase : Union[str, Any] =StableDiffusionUpscalePipeline.from_pretrained(
UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase : Any ='''a cat sitting on a park bench'''
lowercase : Union[str, Any] =torch.manual_seed(0 )
lowercase : Tuple =pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=5 , output_type='''np''' , )
lowercase : Any =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 8 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''LayoutLMv2ImageProcessor'''
UpperCamelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : List[str] , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Any =kwargs.pop('''feature_extractor''' )
lowercase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase : Tuple =self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Optional[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : List[str] =features['''words''']
lowercase : Optional[Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
lowercase : List[str] =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : str =self.get_overflowing_images(UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Dict =images
return encoded_inputs
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : str =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}' )
return images_with_overflow
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8 | 1 |
'''simple docstring'''
import random
def lowercase_ ( __A : int ) -> bool:
"""simple docstring"""
lowercase : Dict =num - 1
lowercase : Union[str, Any] =0
while s % 2 == 0:
lowercase : Any =s // 2
t += 1
for _ in range(5 ):
lowercase : Any =random.randrange(2 , num - 1 )
lowercase : Optional[Any] =pow(__A , __A , __A )
if v != 1:
lowercase : str =0
while v != (num - 1):
if i == t - 1:
return False
else:
lowercase : List[str] =i + 1
lowercase : Tuple =(v**2) % num
return True
def lowercase_ ( __A : int ) -> bool:
"""simple docstring"""
if num < 2:
return False
lowercase : Any =[
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__A )
def lowercase_ ( __A : int = 1_0_2_4 ) -> int:
"""simple docstring"""
while True:
lowercase : Tuple =random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(__A ):
return num
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 8 |
'''simple docstring'''
def lowercase_ ( __A : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
"""simple docstring"""
try:
lowercase : Any =int(__A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =1
lowercase : Dict =2
while i * i <= n:
while n % i == 0:
lowercase : Optional[int] =i
n //= i
i += 1
if n > 1:
lowercase : Dict =n
return int(__A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8 | 1 |
'''simple docstring'''
import re
def lowercase_ ( __A : str ) -> bool:
"""simple docstring"""
lowercase : Any =re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__A , __A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 8 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
lowercase : str =u
for i in range(1 , __A ):
lowercase : Any =temp * (u - i)
return temp
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : List[str] =int(input('''enter the numbers of values: ''' ) )
lowercase : list[list[float]] =[]
for _ in range(__A ):
y.append([] )
for i in range(__A ):
for j in range(__A ):
y[i].append(__A )
lowercase : List[Any] =0
print('''enter the values of parameters in a list: ''' )
lowercase : Optional[int] =list(map(__A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__A ):
lowercase : str =float(input() )
lowercase : int =int(input('''enter the value to interpolate: ''' ) )
lowercase : Union[str, Any] =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A ):
for j in range(n - i ):
lowercase : str =y[j + 1][i - 1] - y[j][i - 1]
lowercase : Any =y[0][0]
for i in range(1 , __A ):
summ += (ucal(__A , __A ) * y[0][i]) / math.factorial(__A )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 8 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''informer'''
UpperCamelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : str = "student_t" , UpperCAmelCase : str = "nll" , UpperCAmelCase : int = 1 , UpperCAmelCase : List[int] = None , UpperCAmelCase : Optional[Union[str, bool]] = "mean" , UpperCAmelCase : int = 0 , UpperCAmelCase : int = 0 , UpperCAmelCase : int = 0 , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : int = 64 , UpperCAmelCase : int = 32 , UpperCAmelCase : int = 32 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : bool = True , UpperCAmelCase : str = "gelu" , UpperCAmelCase : float = 0.0_5 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : int = 100 , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str = "prob" , UpperCAmelCase : int = 5 , UpperCAmelCase : bool = True , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =prediction_length
lowercase : Union[str, Any] =context_length or prediction_length
lowercase : Optional[Any] =distribution_output
lowercase : int =loss
lowercase : str =input_size
lowercase : Optional[Any] =num_time_features
lowercase : Union[str, Any] =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowercase : Any =scaling
lowercase : Any =num_dynamic_real_features
lowercase : Any =num_static_real_features
lowercase : Any =num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowercase : Dict =cardinality
else:
lowercase : Tuple =[0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowercase : str =embedding_dimension
else:
lowercase : Optional[Any] =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase : str =num_parallel_samples
# Transformer architecture configuration
lowercase : List[Any] =input_size * len(self.lags_sequence ) + self._number_of_features
lowercase : List[Any] =d_model
lowercase : Optional[Any] =encoder_attention_heads
lowercase : Any =decoder_attention_heads
lowercase : Any =encoder_ffn_dim
lowercase : Dict =decoder_ffn_dim
lowercase : Optional[Any] =encoder_layers
lowercase : Union[str, Any] =decoder_layers
lowercase : str =dropout
lowercase : Tuple =attention_dropout
lowercase : Optional[int] =activation_dropout
lowercase : Tuple =encoder_layerdrop
lowercase : Any =decoder_layerdrop
lowercase : str =activation_function
lowercase : Any =init_std
lowercase : Tuple =use_cache
# Informer
lowercase : Optional[int] =attention_type
lowercase : Union[str, Any] =sampling_factor
lowercase : Tuple =distil
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : Tuple ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 8 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =0
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : str =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : int =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str =CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase : Optional[int] =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : Optional[Any] =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase : str =CLIPImageProcessor(**UpperCAmelCase )
# save in new folder
model_config.save_pretrained(UpperCAmelCase )
config.save_pretrained(UpperCAmelCase )
lowercase : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : int =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained('''clip-base''' )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Optional[int] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
lowercase : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
lowercase : List[str] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[int] =CustomImageProcessor.from_pretrained(UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self : Any ) -> Any:
'''simple docstring'''
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# If remote code is not set, the default is to use local
lowercase : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase : Tuple =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Dict =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 8 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
SCREAMING_SNAKE_CASE = (3, 9, -11, 0, 7, 5, 1, -1)
SCREAMING_SNAKE_CASE = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = 42
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Iterable[int] ) -> None:
'''simple docstring'''
lowercase : Node | None =None
for i in sorted(UpperCAmelCase , reverse=UpperCAmelCase ):
lowercase : Optional[Any] =Node(UpperCAmelCase , self.head )
def __iter__( self : Any ) -> Iterator[int]:
'''simple docstring'''
lowercase : Dict =self.head
while node:
yield node.data
lowercase : List[Any] =node.next_node
def __len__( self : List[str] ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self : Tuple ) -> str:
'''simple docstring'''
return " -> ".join([str(UpperCAmelCase ) for node in self] )
def lowercase_ ( __A : SortedLinkedList , __A : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__A ) + list(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 8 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE = 8.988E9 # units = N * m^s * C^-2
def lowercase_ ( __A : float , __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
lowercase : Dict =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowercase : Union[str, Any] =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase : Tuple =(COULOMBS_CONSTANT * charge_product / abs(__A )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 1 |
'''simple docstring'''
def lowercase_ ( __A : str , __A : str ) -> int:
"""simple docstring"""
if len(__A ) != len(__A ):
raise ValueError('''String lengths must match!''' )
lowercase : List[Any] =0
for chara, chara in zip(__A , __A ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8 | 1 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = tuple[float, float, float]
SCREAMING_SNAKE_CASE = tuple[float, float, float]
def lowercase_ ( __A : Pointad , __A : Pointad ) -> Vectorad:
"""simple docstring"""
lowercase : Optional[Any] =end_pointa[0] - end_pointa[0]
lowercase : Dict =end_pointa[1] - end_pointa[1]
lowercase : List[Any] =end_pointa[2] - end_pointa[2]
return (x, y, z)
def lowercase_ ( __A : Vectorad , __A : Vectorad ) -> Vectorad:
"""simple docstring"""
lowercase : List[str] =ab[1] * ac[2] - ab[2] * ac[1] # *i
lowercase : Optional[int] =(ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowercase : str =ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def lowercase_ ( __A : Vectorad , __A : int ) -> bool:
"""simple docstring"""
return tuple(round(__A , __A ) for x in vector ) == (0, 0, 0)
def lowercase_ ( __A : Pointad , __A : Pointad , __A : Pointad , __A : int = 1_0 ) -> bool:
"""simple docstring"""
lowercase : Optional[int] =create_vector(__A , __A )
lowercase : int =create_vector(__A , __A )
return is_zero_vector(get_ad_vectors_cross(__A , __A ) , __A )
| 8 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =list(poly_a or [0] )[:]
lowercase : Dict =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : Tuple =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[int] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : str =self.__multiply()
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase : List[Any] =self.c_max_length // 2
while next_ncol > 0:
lowercase : str =[[] for i in range(UpperCAmelCase )]
lowercase : List[str] =self.root**next_ncol
# First half of next step
lowercase : Union[str, Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Tuple =new_dft
lowercase : List[Any] =next_ncol // 2
return dft[0]
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.__dft('''A''' )
lowercase : Union[str, Any] =self.__dft('''B''' )
lowercase : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Tuple =2
while next_ncol <= self.c_max_length:
lowercase : Tuple =[[] for i in range(UpperCAmelCase )]
lowercase : Tuple =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : List[str] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] ='''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] ='''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 1 |
'''simple docstring'''
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] ={}
def A__ ( self : Any ) -> None:
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(UpperCAmelCase , ''' -> ''' , ''' -> '''.join([str(UpperCAmelCase ) for j in self.vertex[i]] ) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int ) -> None:
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCAmelCase )
else:
# else make a new vertex
lowercase : Any =[to_vertex]
def A__ ( self : List[str] ) -> None:
'''simple docstring'''
lowercase : Optional[int] =[False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : list ) -> None:
'''simple docstring'''
lowercase : int =True
print(UpperCAmelCase , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 8 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : List[Any] =str(bin(__A ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : Union[str, Any] =str(bin(__A ) )[2:]
if shift_amount >= len(__A ):
return "0b0"
lowercase : Any =binary_number[: len(__A ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
lowercase : str ='''0''' + str(bin(__A ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase : Dict =len(bin(__A )[3:] ) # Find 2's complement of number
lowercase : Optional[Any] =bin(abs(__A ) - (1 << binary_number_length) )[3:]
lowercase : int =(
'''1''' + '''0''' * (binary_number_length - len(__A )) + binary_number
)
if shift_amount >= len(__A ):
return "0b" + binary_number[0] * len(__A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 1 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
lowercase : List[Any] ='''ylacombe/bark-small'''
lowercase : int =tempfile.mkdtemp()
lowercase : Any ='''en_speaker_1'''
lowercase : Union[str, Any] ='''This is a test string'''
lowercase : Union[str, Any] ='''speaker_embeddings_path.json'''
lowercase : Optional[Any] ='''speaker_embeddings'''
def A__ ( self : Tuple , **UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase )
def A__ ( self : Optional[int] ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] =self.get_tokenizer()
lowercase : List[str] =BarkProcessor(tokenizer=UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowercase : List[Any] =BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def A__ ( self : List[Any] ) -> Any:
'''simple docstring'''
lowercase : str =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase : str =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase : Optional[Any] =BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def A__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Any =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase : Any =35
lowercase : Optional[int] =2
lowercase : Dict =8
lowercase : List[str] ={
'''semantic_prompt''': np.ones(UpperCAmelCase ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase : Tuple =processor(text=self.input_string , voice_preset=UpperCAmelCase )
lowercase : Optional[Any] =inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase : Optional[Any] =os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(UpperCAmelCase , **UpperCAmelCase )
lowercase : str =processor(text=self.input_string , voice_preset=UpperCAmelCase )
lowercase : List[str] =inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase : Optional[int] =processor(text=self.input_string , voice_preset=self.voice_preset )
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.get_tokenizer()
lowercase : Dict =BarkProcessor(tokenizer=UpperCAmelCase )
lowercase : List[str] =processor(text=self.input_string )
lowercase : Any =tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 8 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def A__ ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
pass
def lowercase_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =pipeline(
'''document-question-answering''' , model=UpperCAmelCase , tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Optional[Any] =INVOICE_URL
lowercase : Any =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
lowercase : Dict ='''What is the placebo?'''
lowercase : Optional[Any] =[
{
'''image''': load_image(UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =dqa_pipeline(UpperCAmelCase , top_k=2 )
self.assertEqual(
UpperCAmelCase , [
[
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Dict =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
lowercase : Union[str, Any] =INVOICE_URL
lowercase : Tuple ='''How many cats are there?'''
lowercase : Optional[int] =[
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowercase : Optional[Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
lowercase : List[str] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase : Optional[Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Any =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase : int ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Dict =[]
lowercase : str =[]
lowercase : str =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , words=UpperCAmelCase , boxes=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
lowercase : Dict =INVOICE_URL
lowercase : str ='''What is the invoice number?'''
lowercase : List[str] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
lowercase : Dict =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Union[str, Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : int =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Any =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : str ) -> Dict:
'''simple docstring'''
lowercase : Any =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : Tuple =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , )
lowercase : Tuple =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Dict =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : Any =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : str =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
lowercase : Tuple =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Dict =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : Dict =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : List[Any] =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
lowercase : str =INVOICE_URL
lowercase : int ='''What is the invoice number?'''
lowercase : Tuple =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Union[str, Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
lowercase : List[str] =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Union[str, Any] =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
lowercase : Any =INVOICE_URL
lowercase : Union[str, Any] ='''What is the invoice number?'''
lowercase : int =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
pass
| 8 | 1 |
'''simple docstring'''
def lowercase_ ( __A : list[list[int]] , __A : int , __A : int , __A : set ) -> int:
"""simple docstring"""
lowercase , lowercase : Dict =len(__A ), len(grid[0] )
if (
min(__A , __A ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowercase : Any =0
count += depth_first_search(__A , row + 1 , __A , __A )
count += depth_first_search(__A , row - 1 , __A , __A )
count += depth_first_search(__A , __A , col + 1 , __A )
count += depth_first_search(__A , __A , col - 1 , __A )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 |
'''simple docstring'''
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A ) , __A )
return number - int(__A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 8 | 1 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def A__ ( self : Tuple , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : List[Any] =[label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
if len(UpperCAmelCase ) == 0 or len(UpperCAmelCase ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(UpperCAmelCase ) )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : int =[sequences]
lowercase : Tuple =[]
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(UpperCAmelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__A )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : Optional[int]=ZeroShotClassificationArgumentHandler() , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =args_parser
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def A__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def A__ ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : str=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[int]=TruncationStrategy.ONLY_FIRST , **UpperCAmelCase : str ) -> int:
'''simple docstring'''
lowercase : int =self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
lowercase : Optional[int] =self.tokenizer.eos_token
try:
lowercase : int =self.tokenizer(
UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , )
except Exception as e:
if "too short" in str(UpperCAmelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
lowercase : List[str] =self.tokenizer(
UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , padding=UpperCAmelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def A__ ( self : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if kwargs.get('''multi_class''' , UpperCAmelCase ) is not None:
lowercase : Dict =kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
lowercase : str ={}
if "candidate_labels" in kwargs:
lowercase : List[Any] =self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
lowercase : str =kwargs['''hypothesis_template''']
lowercase : Tuple ={}
if "multi_label" in kwargs:
lowercase : Any =kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[int] , UpperCAmelCase : Union[str, List[str]] , *UpperCAmelCase : Dict , **UpperCAmelCase : Tuple , ) -> Optional[int]:
'''simple docstring'''
if len(UpperCAmelCase ) == 0:
pass
elif len(UpperCAmelCase ) == 1 and "candidate_labels" not in kwargs:
lowercase : Optional[Any] =args[0]
else:
raise ValueError(f'Unable to understand extra arguments {args}' )
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]="This example is {}." ) -> Optional[int]:
'''simple docstring'''
lowercase , lowercase : Optional[int] =self._args_parser(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(UpperCAmelCase , UpperCAmelCase ) ):
lowercase : Tuple =self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(UpperCAmelCase ) - 1,
**model_input,
}
def A__ ( self : int , UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] =inputs['''candidate_label''']
lowercase : Union[str, Any] =inputs['''sequence''']
lowercase : Union[str, Any] ={k: inputs[k] for k in self.tokenizer.model_input_names}
lowercase : List[str] =self.model(**UpperCAmelCase )
lowercase : List[str] ={
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =[outputs['''candidate_label'''] for outputs in model_outputs]
lowercase : int =[outputs['''sequence'''] for outputs in model_outputs]
lowercase : Tuple =np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
lowercase : Union[str, Any] =logits.shape[0]
lowercase : Tuple =len(UpperCAmelCase )
lowercase : Optional[Any] =N // n
lowercase : int =logits.reshape((num_sequences, n, -1) )
if multi_label or len(UpperCAmelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
lowercase : Union[str, Any] =self.entailment_id
lowercase : Tuple =-1 if entailment_id == 0 else 0
lowercase : Optional[int] =reshaped_outputs[..., [contradiction_id, entailment_id]]
lowercase : int =np.exp(UpperCAmelCase ) / np.exp(UpperCAmelCase ).sum(-1 , keepdims=UpperCAmelCase )
lowercase : Union[str, Any] =scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
lowercase : Union[str, Any] =reshaped_outputs[..., self.entailment_id]
lowercase : List[str] =np.exp(UpperCAmelCase ) / np.exp(UpperCAmelCase ).sum(-1 , keepdims=UpperCAmelCase )
lowercase : List[Any] =list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 8 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : List[Any] , __A : int , __A : int ) -> Optional[int]:
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowercase_ ( __A : np.ndarray , __A : Optional[str] , __A : Optional[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : int =to_pil_image(__A )
lowercase , lowercase : Tuple =pil_image.size
lowercase : Optional[Any] =pytesseract.image_to_data(__A , lang=__A , output_type='''dict''' , config=__A )
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase : Dict =[idx for idx, word in enumerate(__A ) if not word.strip()]
lowercase : str =[word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : Optional[int] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : List[Any] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : str =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : int =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase : Tuple =[]
for x, y, w, h in zip(__A , __A , __A , __A ):
lowercase : str =[x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
lowercase : List[str] =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : float = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = "" , **UpperCAmelCase : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Tuple =size if size is not None else {'''height''': 224, '''width''': 224}
lowercase : Optional[Any] =get_size_dict(UpperCAmelCase )
lowercase : Optional[Any] =do_resize
lowercase : List[Any] =size
lowercase : List[str] =resample
lowercase : Dict =do_rescale
lowercase : str =rescale_value
lowercase : Optional[int] =do_normalize
lowercase : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Union[str, Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase : List[Any] =apply_ocr
lowercase : Union[str, Any] =ocr_lang
lowercase : str =tesseract_config
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase : Tuple =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase : Optional[Any] =(size['''height'''], size['''width'''])
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase : Optional[int] =do_resize if do_resize is not None else self.do_resize
lowercase : Tuple =size if size is not None else self.size
lowercase : Optional[int] =get_size_dict(UpperCAmelCase )
lowercase : List[str] =resample if resample is not None else self.resample
lowercase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Optional[int] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[Any] =image_mean if image_mean is not None else self.image_mean
lowercase : Optional[int] =image_std if image_std is not None else self.image_std
lowercase : Any =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase : Any =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase : Dict =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase : str =make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowercase : Tuple =[to_numpy_array(UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase : int =[]
lowercase : Tuple =[]
for image in images:
lowercase , lowercase : Dict =apply_tesseract(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
words_batch.append(UpperCAmelCase )
boxes_batch.append(UpperCAmelCase )
if do_resize:
lowercase : int =[self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowercase : str =[self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowercase : Optional[Any] =[to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase : Dict =BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase )
if apply_ocr:
lowercase : int =words_batch
lowercase : List[str] =boxes_batch
return data
| 8 | 1 |
'''simple docstring'''
from math import pi
def lowercase_ ( __A : int , __A : int ) -> float:
"""simple docstring"""
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 8 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =parent
lowercase : Any =13
lowercase : Any =7
lowercase : Optional[int] =True
lowercase : Optional[int] =True
lowercase : Tuple =False
lowercase : Optional[Any] =True
lowercase : Dict =99
lowercase : Union[str, Any] =32
lowercase : Union[str, Any] =2
lowercase : Union[str, Any] =4
lowercase : List[str] =37
lowercase : str ='''gelu'''
lowercase : Dict =0.1
lowercase : List[Any] =0.1
lowercase : List[str] =512
lowercase : Optional[int] =16
lowercase : Optional[Any] =2
lowercase : List[str] =0.0_2
lowercase : Any =3
lowercase : Optional[Any] =4
lowercase : int =None
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
lowercase : Any =None
lowercase : str =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =TFDistilBertModel(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(UpperCAmelCase )
lowercase : str =[input_ids, input_mask]
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =TFDistilBertForMaskedLM(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str =TFDistilBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Optional[Any] =TFDistilBertForSequenceClassification(UpperCAmelCase )
lowercase : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Optional[int] =TFDistilBertForMultipleChoice(UpperCAmelCase )
lowercase : Optional[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Tuple =TFDistilBertForTokenClassification(UpperCAmelCase )
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : str =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Union[str, Any] =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : str =TFDistilBertModelTester(self )
lowercase : int =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase : Union[str, Any] =TFDistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : Tuple =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[Any] =model(UpperCAmelCase )[0]
lowercase : str =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Optional[int] =tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 8 | 1 |
'''simple docstring'''
import heapq
def lowercase_ ( __A : dict ) -> set[int]:
"""simple docstring"""
lowercase : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__A , [-1 * len(__A ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase : Dict =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase : List[str] =heapq.heappop(__A )[1][0]
chosen_vertices.add(__A )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase : str =elem[1][1].index(__A )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__A )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 8 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 1 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version('torch'))
def lowercase_ ( __A : Union[str, Version] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase : Any =STR_OPERATION_TO_FUNC[operation]
if isinstance(__A , __A ):
lowercase : List[Any] =parse(importlib.metadata.version(__A ) )
return operation(__A , parse(__A ) )
def lowercase_ ( __A : str , __A : str ) -> Tuple:
"""simple docstring"""
return compare_versions(__A , __A , __A )
| 8 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version('torch'))
def lowercase_ ( __A : Union[str, Version] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase : Any =STR_OPERATION_TO_FUNC[operation]
if isinstance(__A , __A ):
lowercase : List[Any] =parse(importlib.metadata.version(__A ) )
return operation(__A , parse(__A ) )
def lowercase_ ( __A : str , __A : str ) -> Tuple:
"""simple docstring"""
return compare_versions(__A , __A , __A )
| 8 | 1 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE = {
'Salesforce/codegen-350M-mono': 2_048,
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ = CodeGenTokenizer
def __init__( self : Any , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : str="<|endoftext|>" , UpperCAmelCase : Dict="<|endoftext|>" , UpperCAmelCase : Any="<|endoftext|>" , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Union[str, Any] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
if kwargs.pop('''add_bos_token''' , UpperCAmelCase ):
lowercase : List[str] =kwargs.pop('''name_or_path''' , '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
f'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
f'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
lowercase : List[str] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCAmelCase ) != add_prefix_space:
lowercase : List[Any] =getattr(UpperCAmelCase , pre_tok_state.pop('''type''' ) )
lowercase : Tuple =add_prefix_space
lowercase : Union[str, Any] =pre_tok_class(**UpperCAmelCase )
lowercase : str =add_prefix_space
def A__ ( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : str ) -> BatchEncoding:
'''simple docstring'''
lowercase : List[str] =kwargs.get('''is_split_into_words''' , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Optional[int] , *UpperCAmelCase : Any , **UpperCAmelCase : str ) -> BatchEncoding:
'''simple docstring'''
lowercase : str =kwargs.get('''is_split_into_words''' , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase : Union[str, Any] =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , UpperCAmelCase : bool = False , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[List[str]] = None , **UpperCAmelCase : Dict , ) -> str:
'''simple docstring'''
lowercase : str =super().decode(
token_ids=UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , **UpperCAmelCase , )
if truncate_before_pattern is not None and len(UpperCAmelCase ) > 0:
lowercase : List[str] =self.truncate(UpperCAmelCase , UpperCAmelCase )
return decoded_text
def A__ ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
def find_re(UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
lowercase : Union[str, Any] =pattern.search(UpperCAmelCase , UpperCAmelCase )
return m.start() if m else -1
lowercase : str =[re.compile(UpperCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
lowercase : Any =list(re.finditer('''^print''' , UpperCAmelCase , re.MULTILINE ) )
if len(UpperCAmelCase ) > 1:
lowercase : List[str] =completion[: prints[1].start()]
lowercase : Optional[int] =list(re.finditer('''^def''' , UpperCAmelCase , re.MULTILINE ) )
if len(UpperCAmelCase ) > 1:
lowercase : Optional[Any] =completion[: defs[1].start()]
lowercase : Any =0
lowercase : int =[
pos for pos in [find_re(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) for terminal in terminals] if pos != -1
]
if len(UpperCAmelCase ) > 0:
return completion[: min(UpperCAmelCase )]
else:
return completion
| 8 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase_ ( __A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
lowercase : List[Any] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
lowercase : List[str] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
lowercase : Union[str, Any] =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 8 | 1 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( __A : Union[str, Any] , __A : List[str] , __A : str , __A : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] =BigBirdConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
lowercase : Optional[int] =BigBirdForQuestionAnswering(SCREAMING_SNAKE_CASE_ )
else:
lowercase : Tuple =BigBirdForPreTraining(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , is_trivia_qa=SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class UpperCAmelCase_ ( _snake_case ):
"""simple docstring"""
UpperCamelCase_ = '''blip_2_vision_model'''
def __init__( self : Union[str, Any] , UpperCAmelCase : Dict=1408 , UpperCAmelCase : Optional[int]=6144 , UpperCAmelCase : Union[str, Any]=39 , UpperCAmelCase : str=16 , UpperCAmelCase : Dict=224 , UpperCAmelCase : str=14 , UpperCAmelCase : int="gelu" , UpperCAmelCase : Optional[int]=0.0_0_0_0_1 , UpperCAmelCase : str=0.0 , UpperCAmelCase : List[str]=1e-10 , UpperCAmelCase : List[str]=True , **UpperCAmelCase : List[str] , ) -> int:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowercase : Tuple =hidden_size
lowercase : Dict =intermediate_size
lowercase : Dict =num_hidden_layers
lowercase : Union[str, Any] =num_attention_heads
lowercase : Union[str, Any] =patch_size
lowercase : Optional[int] =image_size
lowercase : List[str] =initializer_range
lowercase : Any =attention_dropout
lowercase : Union[str, Any] =layer_norm_eps
lowercase : Tuple =hidden_act
lowercase : List[Any] =qkv_bias
@classmethod
def A__ ( cls : Optional[Any] , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : Union[str, Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCAmelCase__ )
lowercase , lowercase : Any =cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
lowercase : str =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class UpperCAmelCase_ ( _snake_case ):
"""simple docstring"""
UpperCamelCase_ = '''blip_2_qformer'''
def __init__( self : Any , UpperCAmelCase : Dict=3_0522 , UpperCAmelCase : str=768 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : Any=12 , UpperCAmelCase : Tuple=3072 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Optional[Any]=512 , UpperCAmelCase : List[str]=0.0_2 , UpperCAmelCase : Union[str, Any]=1e-12 , UpperCAmelCase : Dict=0 , UpperCAmelCase : Optional[int]="absolute" , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : List[str]=1408 , **UpperCAmelCase : Tuple , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase : Union[str, Any] =vocab_size
lowercase : int =hidden_size
lowercase : str =num_hidden_layers
lowercase : Tuple =num_attention_heads
lowercase : str =hidden_act
lowercase : Tuple =intermediate_size
lowercase : List[str] =hidden_dropout_prob
lowercase : List[str] =attention_probs_dropout_prob
lowercase : str =max_position_embeddings
lowercase : List[str] =initializer_range
lowercase : Union[str, Any] =layer_norm_eps
lowercase : Union[str, Any] =position_embedding_type
lowercase : List[Any] =cross_attention_frequency
lowercase : Optional[Any] =encoder_hidden_size
@classmethod
def A__ ( cls : List[str] , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : Dict ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCAmelCase__ )
lowercase , lowercase : Dict =cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
lowercase : Any =config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class UpperCAmelCase_ ( _snake_case ):
"""simple docstring"""
UpperCamelCase_ = '''blip-2'''
UpperCamelCase_ = True
def __init__( self : List[Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : int=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : Tuple=32 , **UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
if vision_config is None:
lowercase : str ={}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
lowercase : str ={}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
lowercase : Dict ={}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
lowercase : Dict =BlipaVisionConfig(**lowerCAmelCase__ )
lowercase : Union[str, Any] =BlipaQFormerConfig(**lowerCAmelCase__ )
lowercase : Tuple =text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
lowercase : Union[str, Any] =CONFIG_MAPPING[text_model_type](**lowerCAmelCase__ )
lowercase : Any =self.text_config.tie_word_embeddings
lowercase : List[Any] =self.text_config.is_encoder_decoder
lowercase : List[Any] =num_query_tokens
lowercase : List[str] =self.vision_config.hidden_size
lowercase : Dict =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowercase : Any =1.0
lowercase : List[Any] =0.0_2
@classmethod
def A__ ( cls : Optional[Any] , UpperCAmelCase : BlipaVisionConfig , UpperCAmelCase : BlipaQFormerConfig , UpperCAmelCase : PretrainedConfig , **UpperCAmelCase : int , ) -> str:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase__ , )
def A__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : str =copy.deepcopy(self.__dict__ )
lowercase : Tuple =self.vision_config.to_dict()
lowercase : Union[str, Any] =self.qformer_config.to_dict()
lowercase : str =self.text_config.to_dict()
lowercase : int =self.__class__.model_type
return output
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
SCREAMING_SNAKE_CASE = 'src/diffusers'
SCREAMING_SNAKE_CASE = '.'
# This is to make sure the diffusers module imported is the one in the repo.
SCREAMING_SNAKE_CASE = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
SCREAMING_SNAKE_CASE = spec.loader.load_module()
def lowercase_ ( __A : List[Any] , __A : Tuple ) -> Optional[Any]:
"""simple docstring"""
return line.startswith(_SCREAMING_SNAKE_CASE ) or len(_SCREAMING_SNAKE_CASE ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , _SCREAMING_SNAKE_CASE ) is not None
def lowercase_ ( __A : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : List[str] =object_name.split('''.''' )
lowercase : List[Any] =0
# First let's find the module where our object lives.
lowercase : Dict =parts[i]
while i < len(_SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , F'{module}.py' ) ):
i += 1
if i < len(_SCREAMING_SNAKE_CASE ):
lowercase : Dict =os.path.join(_SCREAMING_SNAKE_CASE , parts[i] )
if i >= len(_SCREAMING_SNAKE_CASE ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(_SCREAMING_SNAKE_CASE , F'{module}.py' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase : Tuple =f.readlines()
# Now let's find the class / func in the code!
lowercase : Any =''''''
lowercase : Optional[int] =0
for name in parts[i + 1 :]:
while (
line_index < len(_SCREAMING_SNAKE_CASE ) and re.search(RF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_SCREAMING_SNAKE_CASE ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowercase : Optional[int] =line_index
while line_index < len(_SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index] , _SCREAMING_SNAKE_CASE ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase : Dict =lines[start_index:line_index]
return "".join(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
SCREAMING_SNAKE_CASE = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
SCREAMING_SNAKE_CASE = re.compile(r'<FILL\s+[^>]*>')
def lowercase_ ( __A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : List[Any] =code.split('''\n''' )
lowercase : str =0
while idx < len(_SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_SCREAMING_SNAKE_CASE ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def lowercase_ ( __A : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase : List[str] =len(get_indent(_SCREAMING_SNAKE_CASE ) ) > 0
if has_indent:
lowercase : Optional[Any] =F'class Bla:\n{code}'
lowercase : Dict =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=_SCREAMING_SNAKE_CASE )
lowercase : Dict =black.format_str(_SCREAMING_SNAKE_CASE , mode=_SCREAMING_SNAKE_CASE )
lowercase , lowercase : str =style_docstrings_in_code(_SCREAMING_SNAKE_CASE )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def lowercase_ ( __A : Optional[Any] , __A : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase : Optional[Any] =f.readlines()
lowercase : Tuple =[]
lowercase : Union[str, Any] =0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_SCREAMING_SNAKE_CASE ):
lowercase : Tuple =_re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowercase , lowercase , lowercase : Tuple =search.groups()
lowercase : Optional[Any] =find_code_in_diffusers(_SCREAMING_SNAKE_CASE )
lowercase : Tuple =get_indent(_SCREAMING_SNAKE_CASE )
lowercase : Union[str, Any] =line_index + 1 if indent == theoretical_indent else line_index + 2
lowercase : Dict =theoretical_indent
lowercase : Tuple =start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowercase : List[str] =True
while line_index < len(_SCREAMING_SNAKE_CASE ) and should_continue:
line_index += 1
if line_index >= len(_SCREAMING_SNAKE_CASE ):
break
lowercase : Optional[Any] =lines[line_index]
lowercase : Dict =_should_continue(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and re.search(F'^{indent}# End copy' , _SCREAMING_SNAKE_CASE ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase : str =lines[start_index:line_index]
lowercase : List[str] =''''''.join(_SCREAMING_SNAKE_CASE )
# Remove any nested `Copied from` comments to avoid circular copies
lowercase : str =[line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(_SCREAMING_SNAKE_CASE ) is None]
lowercase : List[str] ='''\n'''.join(_SCREAMING_SNAKE_CASE )
# Before comparing, use the `replace_pattern` on the original code.
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase : Any =replace_pattern.replace('''with''' , '''''' ).split(''',''' )
lowercase : int =[_re_replace_pattern.search(_SCREAMING_SNAKE_CASE ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowercase , lowercase , lowercase : str =pattern.groups()
lowercase : str =re.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if option.strip() == "all-casing":
lowercase : Optional[Any] =re.sub(obja.lower() , obja.lower() , _SCREAMING_SNAKE_CASE )
lowercase : int =re.sub(obja.upper() , obja.upper() , _SCREAMING_SNAKE_CASE )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowercase : Union[str, Any] =blackify(lines[start_index - 1] + theoretical_code )
lowercase : Optional[Any] =theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowercase : List[str] =lines[:start_index] + [theoretical_code] + lines[line_index:]
lowercase : Union[str, Any] =start_index + 1
if overwrite and len(_SCREAMING_SNAKE_CASE ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_SCREAMING_SNAKE_CASE )
return diffs
def lowercase_ ( __A : Dict = False ) -> str:
"""simple docstring"""
lowercase : Dict =glob.glob(os.path.join(_SCREAMING_SNAKE_CASE , '''**/*.py''' ) , recursive=_SCREAMING_SNAKE_CASE )
lowercase : str =[]
for filename in all_files:
lowercase : Optional[Any] =is_copy_consistent(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase : List[Any] ='''\n'''.join(_SCREAMING_SNAKE_CASE )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 702 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
SCREAMING_SNAKE_CASE = {'allegro/herbert-base-cased': 514}
SCREAMING_SNAKE_CASE = {}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = HerbertTokenizer
def __init__( self : Dict , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : str="<pad>" , UpperCAmelCase : Optional[Any]="<mask>" , UpperCAmelCase : List[str]="</s>" , **UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sep_token=UpperCAmelCase , **UpperCAmelCase , )
def A__ ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : List[Any] =[self.cls_token_id]
lowercase : Any =[self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[Any] =[self.sep_token_id]
lowercase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase : List[Any] =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 8 | 0 |
'''simple docstring'''
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase : Dict , UpperCAmelCase : List[Any]=13 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : str=99 , UpperCAmelCase : int=32 , UpperCAmelCase : Optional[int]=5 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Dict=True , UpperCAmelCase : Optional[Any]=512 , UpperCAmelCase : int=16 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : int=0.0_2 , UpperCAmelCase : Any=3 , UpperCAmelCase : str=4 , UpperCAmelCase : Any=None , ) -> Any:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : str =batch_size
lowercase : int =seq_length
lowercase : Optional[Any] =is_training
lowercase : Dict =use_input_mask
lowercase : Tuple =use_token_type_ids
lowercase : Union[str, Any] =use_labels
lowercase : Optional[Any] =vocab_size
lowercase : Tuple =hidden_size
lowercase : int =num_hidden_layers
lowercase : str =num_attention_heads
lowercase : Dict =intermediate_multiple_size
lowercase : Optional[Any] =hidden_act
lowercase : Union[str, Any] =hidden_dropout
lowercase : Optional[Any] =attention_dropout
lowercase : Optional[int] =weight_tying
lowercase : Optional[Any] =max_position_embeddings
lowercase : Any =type_vocab_size
lowercase : Any =type_sequence_label_size
lowercase : int =initializer_range
lowercase : List[Any] =num_labels
lowercase : List[str] =num_choices
lowercase : int =scope
def A__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Dict =None
if self.use_input_mask:
lowercase : List[Any] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
if self.use_labels:
lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] =self.get_config()
return config, input_ids, input_mask, token_labels
def A__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def A__ ( self : List[str] ) -> Any:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Any =self.prepare_config_and_inputs()
lowercase : Optional[int] =True
return config, input_ids, input_mask, token_labels
def A__ ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] =GPTNeoXJapaneseModel(config=A_ )
model.to(A_ )
model.eval()
lowercase : Any =model(A_ , attention_mask=A_ )
lowercase : Dict =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : int ) -> Any:
'''simple docstring'''
lowercase : List[Any] =True
lowercase : List[Any] =GPTNeoXJapaneseModel(A_ )
model.to(A_ )
model.eval()
lowercase : Union[str, Any] =model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : str , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Tuple =GPTNeoXJapaneseForCausalLM(config=A_ )
model.to(A_ )
model.eval()
lowercase : List[str] =model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
lowercase : Any =True
lowercase : List[str] =GPTNeoXJapaneseForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
lowercase : str =model(A_ , attention_mask=A_ , use_cache=A_ )
lowercase : Optional[int] =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase : Optional[int] =ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase : Optional[int] =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase : List[str] =torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase : Dict =torch.cat([input_mask, next_mask] , dim=-1 )
lowercase : Union[str, Any] =model(A_ , attention_mask=A_ , output_hidden_states=A_ )
lowercase : str =output_from_no_past['''hidden_states'''][0]
lowercase : Any =model(
A_ , attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['''hidden_states'''][0]
# select random slice
lowercase : Any =ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase : Dict =output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase : Any =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) )
def A__ ( self : List[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[int] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Tuple =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
UpperCamelCase_ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
UpperCamelCase_ = (
{"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
lowercase : List[str] =GPTNeoXJapaneseModelTester(self )
lowercase : List[Any] =ConfigTester(self , config_class=A_ , hidden_size=37 )
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A_ , A_ , A_ )
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(A_ , A_ , A_ )
def A__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : List[Any] =self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase : List[Any] =None
self.model_tester.create_and_check_model_as_decoder(A_ , A_ , A_ )
def A__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(A_ , A_ , A_ )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*A_ )
@slow
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : List[Any] ='''abeja/gpt-neox-japanese-2.7b'''
lowercase : List[str] =['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
lowercase : Dict =[
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
lowercase : Union[str, Any] =GPTNeoXJapaneseTokenizer.from_pretrained(A_ )
lowercase : int =GPTNeoXJapaneseForCausalLM.from_pretrained(A_ )
lowercase : Tuple =[]
for prompt in prompts:
lowercase : int =tokenizer(A_ , return_tensors='''pt''' ).input_ids
lowercase : Tuple =model.generate(A_ , max_length=50 )
lowercase : List[Any] =tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
predicted_outputs += generated_string
self.assertListEqual(A_ , A_ )
| 703 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Any ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[int] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Any ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Tuple ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : List[str] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : Tuple =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Optional[Any] ='''1'''
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : str ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Optional[int] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Optional[Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : str =self.get_env()
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Any ='''1'''
lowercase : Optional[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''
from transformers import pipeline
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase : Tuple =self.get_env()
lowercase : Optional[int] ='''1'''
lowercase : Union[str, Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase : Dict =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] ='''
from transformers import AutoModel
'''
lowercase : Dict ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : int =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : List[str] ='''1'''
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 8 | 0 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
SCREAMING_SNAKE_CASE = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def lowercase_ ( __A : List[str] , __A : Any ) -> Dict:
"""simple docstring"""
inspect_dataset(_lowercase , _lowercase )
lowercase : Any =path + '.py'
assert script_name in os.listdir(_lowercase )
assert "__pycache__" not in os.listdir(_lowercase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def lowercase_ ( __A : Tuple , __A : Any ) -> Tuple:
"""simple docstring"""
inspect_metric(_lowercase , _lowercase )
lowercase : List[Any] =path + '.py'
assert script_name in os.listdir(_lowercase )
assert "__pycache__" not in os.listdir(_lowercase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def lowercase_ ( __A : int , __A : str , __A : int ) -> Dict:
"""simple docstring"""
lowercase : Any =get_dataset_config_info(_lowercase , config_name=_lowercase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def lowercase_ ( __A : Optional[int] , __A : List[str] , __A : Tuple ) -> List[str]:
"""simple docstring"""
with pytest.raises(_lowercase ):
get_dataset_config_info(_lowercase , config_name=_lowercase )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def lowercase_ ( __A : Tuple , __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Tuple =get_dataset_config_names(_lowercase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def lowercase_ ( __A : str , __A : str , __A : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase : List[str] =get_dataset_infos(_lowercase )
assert list(infos.keys() ) == expected_configs
lowercase : Tuple =expected_configs[0]
assert expected_config in infos
lowercase : str =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def lowercase_ ( __A : List[Any] , __A : int , __A : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase : Any =get_dataset_infos(_lowercase )
assert expected_config in infos
lowercase : Optional[Any] =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def lowercase_ ( __A : Optional[int] , __A : Union[str, Any] , __A : Dict ) -> Optional[int]:
"""simple docstring"""
with pytest.raises(_lowercase ):
get_dataset_split_names(_lowercase , config_name=_lowercase )
| 704 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : int ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowercase : Stack[int] =Stack()
lowercase : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
lowercase : Optional[Any] =operator_stack.peek()
operator_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : List[str] =operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 8 | 0 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase_ ( lowercase__ ):
"""simple docstring"""
def __get__( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any=None ) -> Any:
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
lowercase : Optional[int] ="__cached_" + self.fget.__name__
lowercase : Optional[Any] =getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if cached is None:
lowercase : List[Any] =self.fget(__lowerCamelCase )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return cached
def lowercase_ ( __A : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[Any] =val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'invalid truth value {val!r}' )
def lowercase_ ( __A : str ) -> Any:
"""simple docstring"""
if is_torch_fx_proxy(_A ):
return True
if is_torch_available():
import torch
if isinstance(_A , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_A , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_A , (jnp.ndarray, Tracer) ):
return True
return isinstance(_A , np.ndarray )
def lowercase_ ( __A : int ) -> Union[str, Any]:
"""simple docstring"""
return isinstance(_A , np.ndarray )
def lowercase_ ( __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
return _is_numpy(_A )
def lowercase_ ( __A : List[str] ) -> Any:
"""simple docstring"""
import torch
return isinstance(_A , torch.Tensor )
def lowercase_ ( __A : Any ) -> List[str]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch(_A )
def lowercase_ ( __A : str ) -> Union[str, Any]:
"""simple docstring"""
import torch
return isinstance(_A , torch.device )
def lowercase_ ( __A : Any ) -> Optional[Any]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(_A )
def lowercase_ ( __A : int ) -> int:
"""simple docstring"""
import torch
if isinstance(_A , _A ):
if hasattr(_A , _A ):
lowercase : List[Any] =getattr(_A , _A )
else:
return False
return isinstance(_A , torch.dtype )
def lowercase_ ( __A : Tuple ) -> str:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(_A )
def lowercase_ ( __A : str ) -> Union[str, Any]:
"""simple docstring"""
import tensorflow as tf
return isinstance(_A , tf.Tensor )
def lowercase_ ( __A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(_A )
def lowercase_ ( __A : Dict ) -> List[Any]:
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_A , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(_A )
return type(_A ) == tf.Tensor
def lowercase_ ( __A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(_A )
def lowercase_ ( __A : Optional[int] ) -> List[str]:
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(_A , jnp.ndarray )
def lowercase_ ( __A : List[Any] ) -> str:
"""simple docstring"""
return False if not is_flax_available() else _is_jax(_A )
def lowercase_ ( __A : Any ) -> List[str]:
"""simple docstring"""
if isinstance(_A , (dict, UserDict) ):
return {k: to_py_obj(_A ) for k, v in obj.items()}
elif isinstance(_A , (list, tuple) ):
return [to_py_obj(_A ) for o in obj]
elif is_tf_tensor(_A ):
return obj.numpy().tolist()
elif is_torch_tensor(_A ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_A ):
return np.asarray(_A ).tolist()
elif isinstance(_A , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowercase_ ( __A : List[Any] ) -> Tuple:
"""simple docstring"""
if isinstance(_A , (dict, UserDict) ):
return {k: to_numpy(_A ) for k, v in obj.items()}
elif isinstance(_A , (list, tuple) ):
return np.array(_A )
elif is_tf_tensor(_A ):
return obj.numpy()
elif is_torch_tensor(_A ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_A ):
return np.asarray(_A )
else:
return obj
class UpperCAmelCase_ ( lowercase__ ):
"""simple docstring"""
def A__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[int] =fields(self )
# Safety and consistency checks
if not len(__lowerCamelCase ):
raise ValueError(f'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'{self.__class__.__name__} should not have more than one required field.' )
lowercase : str =getattr(self , class_fields[0].name )
lowercase : List[Any] =all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__lowerCamelCase ):
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase : Dict =first_field.items()
lowercase : List[str] =True
else:
try:
lowercase : Dict =iter(__lowerCamelCase )
lowercase : Dict =True
except TypeError:
lowercase : Optional[int] =False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__lowerCamelCase ):
if (
not isinstance(__lowerCamelCase , (list, tuple) )
or not len(__lowerCamelCase ) == 2
or not isinstance(element[0] , __lowerCamelCase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowercase : int =first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowercase : Optional[int] =element[1]
elif first_field is not None:
lowercase : str =first_field
else:
for field in class_fields:
lowercase : str =getattr(self , field.name )
if v is not None:
lowercase : Any =v
def __delitem__( self : Tuple , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def A__ ( self : Optional[int] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def A__ ( self : Optional[int] , *UpperCAmelCase : Any , **UpperCAmelCase : int ) -> str:
'''simple docstring'''
raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def A__ ( self : str , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self : int , UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase : List[str] =dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__lowerCamelCase , __lowerCamelCase )
super().__setattr__(__lowerCamelCase , __lowerCamelCase )
def __setitem__( self : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().__setitem__(__lowerCamelCase , __lowerCamelCase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__lowerCamelCase , __lowerCamelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase_ ( lowercase__ , lowercase__ ):
"""simple docstring"""
@classmethod
def A__ ( cls : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
raise ValueError(
f'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class UpperCAmelCase_ ( lowercase__ ):
"""simple docstring"""
UpperCamelCase_ = '''longest'''
UpperCamelCase_ = '''max_length'''
UpperCamelCase_ = '''do_not_pad'''
class UpperCAmelCase_ ( lowercase__ ):
"""simple docstring"""
UpperCamelCase_ = '''pt'''
UpperCamelCase_ = '''tf'''
UpperCamelCase_ = '''np'''
UpperCamelCase_ = '''jax'''
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[ContextManager] ) -> int:
'''simple docstring'''
lowercase : Optional[int] =context_managers
lowercase : Dict =ExitStack()
def __enter__( self : Dict ) -> int:
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(__lowerCamelCase )
def __exit__( self : List[str] , *UpperCAmelCase : Any , **UpperCAmelCase : int ) -> int:
'''simple docstring'''
self.stack.__exit__(*__lowerCamelCase , **__lowerCamelCase )
def lowercase_ ( __A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : str =infer_framework(_A )
if framework == "tf":
lowercase : Union[str, Any] =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowercase : Union[str, Any] =inspect.signature(model_class.forward ) # PyTorch models
else:
lowercase : Optional[Any] =inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowercase_ ( __A : Any ) -> Tuple:
"""simple docstring"""
lowercase : Any =model_class.__name__
lowercase : Optional[Any] =infer_framework(_A )
if framework == "tf":
lowercase : Optional[int] =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowercase : Optional[Any] =inspect.signature(model_class.forward ) # PyTorch models
else:
lowercase : Optional[Any] =inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowercase_ ( __A : MutableMapping , __A : str = "" , __A : str = "." ) -> Any:
"""simple docstring"""
def _flatten_dict(__A : Optional[int] , __A : Any="" , __A : Any="." ):
for k, v in d.items():
lowercase : List[str] =str(_A ) + delimiter + str(_A ) if parent_key else k
if v and isinstance(_A , _A ):
yield from flatten_dict(_A , _A , delimiter=_A ).items()
else:
yield key, v
return dict(_flatten_dict(_A , _A , _A ) )
@contextmanager
def lowercase_ ( __A : Dict , __A : bool = False ) -> List[Any]:
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowercase_ ( __A : Optional[int] , __A : Optional[int]=None ) -> Union[str, Any]:
"""simple docstring"""
if is_numpy_array(_A ):
return np.transpose(_A , axes=_A )
elif is_torch_tensor(_A ):
return array.T if axes is None else array.permute(*_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.transpose(_A , perm=_A )
elif is_jax_tensor(_A ):
return jnp.transpose(_A , axes=_A )
else:
raise ValueError(F'Type not supported for transpose: {type(_A )}.' )
def lowercase_ ( __A : Any , __A : Any ) -> Union[str, Any]:
"""simple docstring"""
if is_numpy_array(_A ):
return np.reshape(_A , _A )
elif is_torch_tensor(_A ):
return array.reshape(*_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.reshape(_A , _A )
elif is_jax_tensor(_A ):
return jnp.reshape(_A , _A )
else:
raise ValueError(F'Type not supported for reshape: {type(_A )}.' )
def lowercase_ ( __A : Dict , __A : int=None ) -> Tuple:
"""simple docstring"""
if is_numpy_array(_A ):
return np.squeeze(_A , axis=_A )
elif is_torch_tensor(_A ):
return array.squeeze() if axis is None else array.squeeze(dim=_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.squeeze(_A , axis=_A )
elif is_jax_tensor(_A ):
return jnp.squeeze(_A , axis=_A )
else:
raise ValueError(F'Type not supported for squeeze: {type(_A )}.' )
def lowercase_ ( __A : Any , __A : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if is_numpy_array(_A ):
return np.expand_dims(_A , _A )
elif is_torch_tensor(_A ):
return array.unsqueeze(dim=_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.expand_dims(_A , axis=_A )
elif is_jax_tensor(_A ):
return jnp.expand_dims(_A , axis=_A )
else:
raise ValueError(F'Type not supported for expand_dims: {type(_A )}.' )
def lowercase_ ( __A : int ) -> Any:
"""simple docstring"""
if is_numpy_array(_A ):
return np.size(_A )
elif is_torch_tensor(_A ):
return array.numel()
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.size(_A )
elif is_jax_tensor(_A ):
return array.size
else:
raise ValueError(F'Type not supported for expand_dims: {type(_A )}.' )
def lowercase_ ( __A : Tuple , __A : Tuple ) -> Optional[Any]:
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(_A , (tuple, list) ):
lowercase : Optional[Any] =[F'{repo_id}--{v}' if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
lowercase : Tuple =F'{repo_id}--{value}'
return auto_map
def lowercase_ ( __A : Dict ) -> List[str]:
"""simple docstring"""
for base_class in inspect.getmro(_A ):
lowercase : Optional[int] =base_class.__module__
lowercase : List[str] =base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'Could not infer framework from class {model_class}.' )
| 705 |
'''simple docstring'''
import re
def lowercase_ ( __A : str ) -> bool:
"""simple docstring"""
lowercase : Any =re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__A , __A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 8 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''LayoutLMv3ImageProcessor'''
UpperCamelCase_ = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self : Any , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple=None , **UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
lowercase : str =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase_ , )
lowercase : Dict =kwargs.pop('''feature_extractor''' )
lowercase : Optional[int] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : List[str] , ) -> int:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
lowercase : Optional[Any] =self.image_processor(images=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowercase : Optional[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : int =features['words']
lowercase : int =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
# add pixel values
lowercase : Any =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : List[Any] =self.get_overflowing_images(UpperCAmelCase_ , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Any =images
return encoded_inputs
def A__ ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : List[str] =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(UpperCAmelCase_ )} and {len(UpperCAmelCase_ )}' )
return images_with_overflow
def A__ ( self : Tuple , *UpperCAmelCase : str , **UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def A__ ( self : Optional[Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def A__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def A__ ( self : List[Any] ) -> Any:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase_ , )
return self.image_processor_class
@property
def A__ ( self : Any ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase_ , )
return self.image_processor
| 706 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 8 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = XGLMConfig
UpperCamelCase_ = {}
UpperCamelCase_ = 'gelu'
def __init__( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=14 , UpperCAmelCase : Any=7 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=True , UpperCAmelCase : str=99 , UpperCAmelCase : Dict=32 , UpperCAmelCase : int=2 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Optional[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Optional[int]=512 , UpperCAmelCase : Any=0.0_2 , ) -> str:
'''simple docstring'''
lowercase : Any =parent
lowercase : Tuple =batch_size
lowercase : Dict =seq_length
lowercase : List[str] =is_training
lowercase : int =use_input_mask
lowercase : Any =use_labels
lowercase : str =vocab_size
lowercase : Dict =d_model
lowercase : str =num_hidden_layers
lowercase : Optional[int] =num_attention_heads
lowercase : List[Any] =ffn_dim
lowercase : Optional[int] =activation_function
lowercase : int =activation_dropout
lowercase : Optional[Any] =attention_dropout
lowercase : List[str] =max_position_embeddings
lowercase : Optional[int] =initializer_range
lowercase : str =None
lowercase : Any =0
lowercase : Union[str, Any] =2
lowercase : Optional[Any] =1
def A__ ( self : Any ) -> int:
'''simple docstring'''
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
lowercase : List[str] =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
lowercase : Optional[Any] =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str =self.get_config()
lowercase : List[Any] =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_lowercase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_lowercase , )
def A__ ( self : Tuple ) -> Dict:
'''simple docstring'''
lowercase : str =self.prepare_config_and_inputs()
(
lowercase
) : Dict =config_and_inputs
lowercase : Optional[Any] ={
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase_ = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase_ = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase : Optional[Any] =TFXGLMModelTester(self )
lowercase : List[str] =ConfigTester(self , config_class=_lowercase , n_embd=37 )
def A__ ( self : Any ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any =TFXGLMModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : str , UpperCAmelCase : Union[str, Any]=True ) -> List[Any]:
'''simple docstring'''
lowercase : List[str] =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
lowercase : Tuple =tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowercase : Any =[2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
lowercase : Tuple =model.generate(_lowercase , do_sample=_lowercase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _lowercase )
@slow
def A__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
lowercase : int =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
lowercase : Dict =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
lowercase : Any =tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
lowercase : Any =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
lowercase : List[str] =model.generate(_lowercase , do_sample=_lowercase , seed=[7, 0] )
lowercase : Optional[Any] =tokenizer.decode(output_ids[0] , skip_special_tokens=_lowercase )
lowercase : Optional[Any] =(
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(_lowercase , _lowercase )
@slow
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
lowercase : Optional[Any] =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
lowercase : str ='left'
# use different length sentences to test batching
lowercase : str =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
lowercase : List[Any] =tokenizer(_lowercase , return_tensors='''tf''' , padding=_lowercase )
lowercase : Tuple =inputs['input_ids']
lowercase : Optional[int] =model.generate(input_ids=_lowercase , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
lowercase : Optional[Any] =tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
lowercase : List[Any] =model.generate(input_ids=_lowercase , max_new_tokens=12 )
lowercase : List[Any] =tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
lowercase : Tuple =model.generate(input_ids=_lowercase , max_new_tokens=12 )
lowercase : Union[str, Any] =tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
lowercase : Optional[Any] =tokenizer.decode(output_non_padded[0] , skip_special_tokens=_lowercase )
lowercase : Tuple =tokenizer.decode(output_padded[0] , skip_special_tokens=_lowercase )
lowercase : Dict =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , [non_padded_sentence, padded_sentence] )
| 707 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''LayoutLMv2ImageProcessor'''
UpperCamelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : List[str] , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Any =kwargs.pop('''feature_extractor''' )
lowercase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase : Tuple =self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Optional[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : List[str] =features['''words''']
lowercase : Optional[Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
lowercase : List[str] =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : str =self.get_overflowing_images(UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Dict =images
return encoded_inputs
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : str =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}' )
return images_with_overflow
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : int ) -> int:
"""simple docstring"""
if isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowerCAmelCase ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : str , UpperCAmelCase : str = True , UpperCAmelCase : List[str] = None , UpperCAmelCase : Optional[int] = PILImageResampling.BILINEAR , UpperCAmelCase : List[Any] = True , UpperCAmelCase : int = None , UpperCAmelCase : Optional[int] = True , UpperCAmelCase : Optional[int] = 1 / 255 , UpperCAmelCase : List[Any] = True , UpperCAmelCase : Optional[int] = True , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : str = None , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase : Optional[Any] =size if size is not None else {'''shortest_edge''': 256}
lowercase : List[Any] =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
lowercase : List[Any] =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : List[Any] =get_size_dict(_lowerCAmelCase , param_name='''crop_size''' )
lowercase : Dict =do_resize
lowercase : int =size
lowercase : str =do_center_crop
lowercase : Any =crop_size
lowercase : Dict =resample
lowercase : Any =do_rescale
lowercase : Optional[Any] =rescale_factor
lowercase : Dict =offset
lowercase : Any =do_normalize
lowercase : Union[str, Any] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Tuple =image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple = PILImageResampling.BILINEAR , UpperCAmelCase : List[str] = None , **UpperCAmelCase : List[str] , ) -> List[str]:
'''simple docstring'''
lowercase : Any =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" in size:
lowercase : Dict =get_resize_output_image_size(_lowerCAmelCase , size['''shortest_edge'''] , default_to_square=_lowerCAmelCase )
elif "height" in size and "width" in size:
lowercase : List[Any] =(size['''height'''], size['''width'''])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def A__ ( self : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] = None , **UpperCAmelCase : str , ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] =get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(_lowerCAmelCase , size=(size['''height'''], size['''width''']) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def A__ ( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] = True , UpperCAmelCase : int = None , **UpperCAmelCase : Optional[int] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : List[str] =image.astype(np.floataa )
if offset:
lowercase : Optional[int] =image - (scale / 2)
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def A__ ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict = None , **UpperCAmelCase : Tuple , ) -> Any:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str = None , UpperCAmelCase : List[str] = None , UpperCAmelCase : int = None , UpperCAmelCase : str = None , UpperCAmelCase : Union[str, Any] = None , UpperCAmelCase : Optional[Any] = None , UpperCAmelCase : str = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : List[Any] = None , UpperCAmelCase : Tuple = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Dict = ChannelDimension.FIRST , ) -> List[str]:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
lowercase : int =to_numpy_array(_lowerCAmelCase )
if do_resize:
lowercase : Union[str, Any] =self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase )
if do_center_crop:
lowercase : List[str] =self.center_crop(_lowerCAmelCase , size=_lowerCAmelCase )
if do_rescale:
lowercase : str =self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase , offset=_lowerCAmelCase )
if do_normalize:
lowercase : Any =self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase )
lowercase : Optional[Any] =to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase )
return image
def A__ ( self : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Union[str, Any] = None , UpperCAmelCase : Tuple = None , UpperCAmelCase : Union[str, Any] = None , UpperCAmelCase : List[str] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Tuple = None , UpperCAmelCase : Any = None , UpperCAmelCase : Optional[Any] = None , UpperCAmelCase : List[Any] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Tuple = ChannelDimension.FIRST , **UpperCAmelCase : Optional[Any] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Any =do_resize if do_resize is not None else self.do_resize
lowercase : Any =resample if resample is not None else self.resample
lowercase : int =do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : Dict =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[str] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Union[str, Any] =offset if offset is not None else self.offset
lowercase : Optional[int] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[str] =image_mean if image_mean is not None else self.image_mean
lowercase : List[Any] =image_std if image_std is not None else self.image_std
lowercase : Optional[int] =size if size is not None else self.size
lowercase : Optional[int] =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
lowercase : Optional[Any] =crop_size if crop_size is not None else self.crop_size
lowercase : List[str] =get_size_dict(_lowerCAmelCase , param_name='''crop_size''' )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase : Optional[Any] =make_batched(_lowerCAmelCase )
lowercase : Any =[
[
self._preprocess_image(
image=_lowerCAmelCase , do_resize=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , do_center_crop=_lowerCAmelCase , crop_size=_lowerCAmelCase , do_rescale=_lowerCAmelCase , rescale_factor=_lowerCAmelCase , offset=_lowerCAmelCase , do_normalize=_lowerCAmelCase , image_mean=_lowerCAmelCase , image_std=_lowerCAmelCase , data_format=_lowerCAmelCase , )
for img in video
]
for video in videos
]
lowercase : Optional[int] ={'''pixel_values''': videos}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 708 |
'''simple docstring'''
def lowercase_ ( __A : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
"""simple docstring"""
try:
lowercase : Any =int(__A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =1
lowercase : Dict =2
while i * i <= n:
while n % i == 0:
lowercase : Optional[int] =i
n //= i
i += 1
if n > 1:
lowercase : Dict =n
return int(__A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : list[Any] =[]
lowercase : int =0
lowercase : int =0
def A__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
return self.head == self.tail
def A__ ( self : Any , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
self.data.append(UpperCamelCase_ )
lowercase : str =self.tail + 1
def A__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
lowercase : str =self.data[self.head]
lowercase : Tuple =self.head + 1
return ret
def A__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return self.tail - self.head
def A__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =data
lowercase : MyNode | None =None
lowercase : MyNode | None =None
lowercase : int =1
def A__ ( self : str ) -> int:
'''simple docstring'''
return self.data
def A__ ( self : Any ) -> List[str]:
'''simple docstring'''
return self.left
def A__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
return self.right
def A__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.height
def A__ ( self : Dict , UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : Tuple =data
def A__ ( self : Dict , UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =node
def A__ ( self : int , UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
lowercase : int =node
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase : int =height
def lowercase_ ( __A : List[str] ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.get_height()
def lowercase_ ( __A : Union[str, Any] , __A : int ) -> int:
"""simple docstring"""
if a > b:
return a
return b
def lowercase_ ( __A : Union[str, Any] ) -> MyNode:
"""simple docstring"""
print('''left rotation node:''' , node.get_data() )
lowercase : Union[str, Any] =node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowerCamelCase__ )
lowercase : Optional[int] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCamelCase__ )
lowercase : Optional[int] =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCamelCase__ )
return ret
def lowercase_ ( __A : int ) -> MyNode:
"""simple docstring"""
print('''right rotation node:''' , node.get_data() )
lowercase : Union[str, Any] =node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowerCamelCase__ )
lowercase : Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCamelCase__ )
lowercase : Optional[Any] =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCamelCase__ )
return ret
def lowercase_ ( __A : List[str] ) -> MyNode:
"""simple docstring"""
lowercase : Dict =node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowerCamelCase__ ) )
return right_rotation(lowerCamelCase__ )
def lowercase_ ( __A : Any ) -> MyNode:
"""simple docstring"""
lowercase : List[str] =node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowerCamelCase__ ) )
return left_rotation(lowerCamelCase__ )
def lowercase_ ( __A : Optional[int] , __A : Union[str, Any] ) -> MyNode | None:
"""simple docstring"""
if node is None:
return MyNode(lowerCamelCase__ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowerCamelCase__ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
lowercase : Optional[Any] =node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
lowercase : Dict =right_rotation(lowerCamelCase__ )
else:
lowercase : Union[str, Any] =lr_rotation(lowerCamelCase__ )
else:
node.set_right(insert_node(node.get_right() , lowerCamelCase__ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
lowercase : Any =node.get_right()
assert right_child is not None
if data < right_child.get_data():
lowercase : Union[str, Any] =rl_rotation(lowerCamelCase__ )
else:
lowercase : Any =left_rotation(lowerCamelCase__ )
lowercase : int =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCamelCase__ )
return node
def lowercase_ ( __A : Dict ) -> Any:
"""simple docstring"""
while True:
lowercase : Tuple =root.get_right()
if right_child is None:
break
lowercase : List[Any] =right_child
return root.get_data()
def lowercase_ ( __A : Dict ) -> Any:
"""simple docstring"""
while True:
lowercase : Optional[int] =root.get_left()
if left_child is None:
break
lowercase : List[Any] =left_child
return root.get_data()
def lowercase_ ( __A : List[str] , __A : int ) -> MyNode | None:
"""simple docstring"""
lowercase : int =root.get_left()
lowercase : Tuple =root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
lowercase : List[Any] =get_left_most(lowerCamelCase__ )
root.set_data(lowerCamelCase__ )
root.set_right(del_node(lowerCamelCase__ , lowerCamelCase__ ) )
elif left_child is not None:
lowercase : Tuple =left_child
elif right_child is not None:
lowercase : Optional[int] =right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(lowerCamelCase__ , lowerCamelCase__ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowerCamelCase__ , lowerCamelCase__ ) )
if get_height(lowerCamelCase__ ) - get_height(lowerCamelCase__ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
lowercase : int =left_rotation(lowerCamelCase__ )
else:
lowercase : Dict =rl_rotation(lowerCamelCase__ )
elif get_height(lowerCamelCase__ ) - get_height(lowerCamelCase__ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
lowercase : Optional[int] =right_rotation(lowerCamelCase__ )
else:
lowercase : int =lr_rotation(lowerCamelCase__ )
lowercase : int =my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowerCamelCase__ )
return root
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : MyNode | None =None
def A__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return get_height(self.root )
def A__ ( self : str , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
print('''insert:''' + str(UpperCamelCase_ ) )
lowercase : List[Any] =insert_node(self.root , UpperCamelCase_ )
def A__ ( self : Any , UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
print('''delete:''' + str(UpperCamelCase_ ) )
if self.root is None:
print('''Tree is empty!''' )
return
lowercase : List[Any] =del_node(self.root , UpperCamelCase_ )
def __str__( self : Dict , ) -> int: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
lowercase : List[str] =""
lowercase : int =MyQueue()
q.push(self.root )
lowercase : Optional[Any] =self.get_height()
if layer == 0:
return output
lowercase : Union[str, Any] =0
while not q.is_empty():
lowercase : List[Any] =q.pop()
lowercase : Optional[Any] =" " * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(UpperCamelCase_ )
q.push(UpperCamelCase_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
lowercase : List[Any] =cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , UpperCamelCase_ ) - 1:
lowercase : Dict =layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def lowercase_ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
SCREAMING_SNAKE_CASE = AVLtree()
SCREAMING_SNAKE_CASE = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 709 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
lowercase : str =u
for i in range(1 , __A ):
lowercase : Any =temp * (u - i)
return temp
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : List[str] =int(input('''enter the numbers of values: ''' ) )
lowercase : list[list[float]] =[]
for _ in range(__A ):
y.append([] )
for i in range(__A ):
for j in range(__A ):
y[i].append(__A )
lowercase : List[Any] =0
print('''enter the values of parameters in a list: ''' )
lowercase : Optional[int] =list(map(__A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__A ):
lowercase : str =float(input() )
lowercase : int =int(input('''enter the value to interpolate: ''' ) )
lowercase : Union[str, Any] =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A ):
for j in range(n - i ):
lowercase : str =y[j + 1][i - 1] - y[j][i - 1]
lowercase : Any =y[0][0]
for i in range(1 , __A ):
summ += (ucal(__A , __A ) * y[0][i]) / math.factorial(__A )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 8 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
SCREAMING_SNAKE_CASE = '</w>'
SCREAMING_SNAKE_CASE = '@@ '
def lowercase_ ( __A : List[str] ) -> Any:
"""simple docstring"""
lowercase : int =set()
lowercase : str =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : str =char
return pairs
# Speech2Text2 has no max input length
SCREAMING_SNAKE_CASE = {'facebook/s2t-wav2vec2-large-en-de': 1_024}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : Tuple="<pad>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Dict="<unk>" , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : str=None , **UpperCAmelCase : Optional[Any] , ) -> Any:
'''simple docstring'''
super().__init__(
unk_token=__A , bos_token=__A , eos_token=__A , pad_token=__A , do_lower_case=__A , **__A , )
lowercase : int =do_lower_case
with open(__A , encoding='''utf-8''' ) as vocab_handle:
lowercase : List[Any] =json.load(__A )
lowercase : str ={v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'No merges files provided. {self.__class__.__name__} can only be used for decoding.' )
lowercase : List[Any] =None
lowercase : str =None
else:
with open(__A , encoding='''utf-8''' ) as merges_handle:
lowercase : Optional[Any] =merges_handle.read().split('''\n''' )[:-1]
lowercase : List[Any] =[tuple(merge.split()[:2] ) for merge in merges]
lowercase : List[Any] =dict(zip(__A , range(len(__A ) ) ) )
lowercase : Tuple ={}
@property
def A__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return len(self.decoder )
def A__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowercase : Optional[Any] =get_pairs(__A )
if not pairs:
return token
while True:
lowercase : Any =min(__A , key=lambda UpperCAmelCase : self.bpe_ranks.get(__A , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase : Optional[Any] =bigram
lowercase : Optional[Any] =[]
lowercase : Any =0
while i < len(__A ):
try:
lowercase : Tuple =word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Union[str, Any] =j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(__A )
lowercase : Dict =new_word
if len(__A ) == 1:
break
else:
lowercase : List[Any] =get_pairs(__A )
lowercase : Dict =" ".join(__A )
if word == "\n " + BPE_TOKEN_MERGES:
lowercase : Any ="\n" + BPE_TOKEN_MERGES
if word.endswith(__A ):
lowercase : List[str] =word.replace(__A , '''''' )
lowercase : List[Any] =word.replace(''' ''' , __A )
lowercase : Tuple =word
return word
def A__ ( self : Any , UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
lowercase : List[Any] =text.lower()
lowercase : Optional[int] =text.split()
lowercase : Optional[int] =[]
for token in text:
if token:
split_tokens.extend(list(self.bpe(__A ).split(''' ''' ) ) )
return split_tokens
def A__ ( self : int , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def A__ ( self : List[str] , UpperCAmelCase : int ) -> int:
'''simple docstring'''
lowercase : List[Any] =self.decoder.get(__A , self.unk_token )
return result
def A__ ( self : int , UpperCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : List[str] =" ".join(__A )
# make sure @@ tokens are concatenated
lowercase : int ="".join(string.split(__A ) )
return string
def A__ ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Optional[Any]:
'''simple docstring'''
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Union[str, Any] =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Any =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + '''\n''' )
lowercase : Dict =0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__A , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(__A ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 710 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =0
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : str =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : int =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str =CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase : Optional[int] =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : Optional[Any] =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase : str =CLIPImageProcessor(**UpperCAmelCase )
# save in new folder
model_config.save_pretrained(UpperCAmelCase )
config.save_pretrained(UpperCAmelCase )
lowercase : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : int =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained('''clip-base''' )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Optional[int] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
lowercase : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
lowercase : List[str] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[int] =CustomImageProcessor.from_pretrained(UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self : Any ) -> Any:
'''simple docstring'''
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# If remote code is not set, the default is to use local
lowercase : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase : Tuple =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Dict =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 8 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = ReformerTokenizer
UpperCamelCase_ = ReformerTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = True
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
super().setUp()
lowercase : List[Any] =ReformerTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
lowercase : str ="""<s>"""
lowercase : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : Optional[int] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_a ) , 1000 )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase : List[str] =self.get_tokenizer()
lowercase : Optional[Any] =self.get_rust_tokenizer()
lowercase : int ="""I was born in 92000, and this is falsé."""
lowercase : List[str] =tokenizer.tokenize(_a )
lowercase : Any =rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowercase : List[Any] =tokenizer.encode(_a , add_special_tokens=_a )
lowercase : Union[str, Any] =rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowercase : str =self.get_rust_tokenizer()
lowercase : List[str] =tokenizer.encode(_a )
lowercase : Any =rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def A__ ( self : Union[str, Any] , UpperCAmelCase : int=15 ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase : Dict =self.rust_tokenizer_class.from_pretrained(_a , **_a )
# Simple input
lowercase : List[Any] ="""This is a simple input"""
lowercase : Optional[Any] =["""This is a simple input 1""", """This is a simple input 2"""]
lowercase : int =("""This is a simple input""", """This is a pair""")
lowercase : Dict =[
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding='''max_length''' )
# Simple input
self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding='''max_length''' )
# Simple input
self.assertRaises(
_a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding='''max_length''' , )
# Pair input
self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding='''max_length''' )
# Pair input
self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding='''max_length''' )
# Pair input
self.assertRaises(
_a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding='''max_length''' , )
def A__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def A__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =ReformerTokenizer(_a , keep_accents=_a )
lowercase : Any =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [285, 46, 10, 170, 382] , )
lowercase : Any =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : Optional[int] =tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase : Union[str, Any] =tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def A__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def A__ ( self : Any ) -> Tuple:
'''simple docstring'''
lowercase : Any ="""Hello World!"""
lowercase : List[str] =[126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =(
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowercase : int =[
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@require_torch
@slow
def A__ ( self : str ) -> Tuple:
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowercase : Dict =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase : List[Any] =""" """.join(_a )
lowercase : str =self.big_tokenizer.encode_plus(_a , return_tensors='''pt''' )
lowercase : Tuple =self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
lowercase : Optional[int] =ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowercase : Any =encoded_sequence["""input_ids"""].shape
lowercase : Any =ReformerModel(_a )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_a )
model(**_a )
@slow
def A__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
lowercase : Optional[int] ={"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowercase : Tuple =[
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=_a , sequences=_a , )
| 711 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE = 8.988E9 # units = N * m^s * C^-2
def lowercase_ ( __A : float , __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
lowercase : Dict =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowercase : Union[str, Any] =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase : Tuple =(COULOMBS_CONSTANT * charge_product / abs(__A )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 712 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8 | 0 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowercase_ ( __A : Any ) -> Tuple:
"""simple docstring"""
lowercase : int =VideoMAEConfig()
set_architecture_configs(lowerCAmelCase__ , lowerCAmelCase__ )
if "finetuned" not in model_name:
lowercase : List[Any] =False
if "finetuned" in model_name:
lowercase : Union[str, Any] ='''huggingface/label-files'''
if "kinetics" in model_name:
lowercase : str =4_0_0
lowercase : Any ='''kinetics400-id2label.json'''
elif "ssv2" in model_name:
lowercase : Union[str, Any] =1_7_4
lowercase : Optional[Any] ='''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
lowercase : Optional[Any] =json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase : Dict ={int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowercase : int =idalabel
lowercase : Optional[Any] ={v: k for k, v in idalabel.items()}
return config
def lowercase_ ( __A : str , __A : int ) -> Optional[Any]:
"""simple docstring"""
if "small" in model_name:
lowercase : Dict =3_8_4
lowercase : Optional[Any] =1_5_3_6
lowercase : Union[str, Any] =1_2
lowercase : List[str] =1_6
lowercase : Union[str, Any] =1_2
lowercase : Tuple =3
lowercase : str =1_9_2
lowercase : List[str] =7_6_8
elif "large" in model_name:
lowercase : Any =1_0_2_4
lowercase : Tuple =4_0_9_6
lowercase : List[Any] =2_4
lowercase : Tuple =1_6
lowercase : int =1_2
lowercase : List[Any] =8
lowercase : List[Any] =5_1_2
lowercase : Optional[Any] =2_0_4_8
elif "huge" in model_name:
lowercase : Any =1_2_8_0
lowercase : Union[str, Any] =5_1_2_0
lowercase : Dict =3_2
lowercase : Optional[int] =1_6
lowercase : List[str] =1_2
lowercase : Optional[int] =8
lowercase : Optional[int] =6_4_0
lowercase : Union[str, Any] =2_5_6_0
elif "base" not in model_name:
raise ValueError('''Model name should include either \"small\", \"base\", \"large\", or \"huge\"''' )
def lowercase_ ( __A : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if "encoder." in name:
lowercase : List[Any] =name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
lowercase : int =name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
lowercase : Union[str, Any] =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
lowercase : int =name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowercase : Any =name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase : int =name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
lowercase : str =name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
lowercase : Union[str, Any] =name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
lowercase : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
lowercase : str =name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
lowercase : int =name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
lowercase : List[Any] =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase : List[Any] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase : int =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase : Optional[Any] =name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
lowercase : Tuple =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
lowercase : Optional[int] =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
lowercase : Tuple =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowercase : int =name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowercase : List[str] =name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
lowercase : Tuple =name.replace('''head''' , '''classifier''' )
return name
def lowercase_ ( __A : List[Any] , __A : Tuple ) -> int:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase : List[Any] =orig_state_dict.pop(lowerCAmelCase__ )
if key.startswith('''encoder.''' ):
lowercase : Optional[Any] =key.replace('''encoder.''' , '''''' )
if "qkv" in key:
lowercase : int =key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
lowercase : Optional[int] =config.decoder_hidden_size
lowercase : List[Any] =int(key_split[2] )
lowercase : Union[str, Any] ='''decoder.decoder_layers.'''
if "weight" in key:
lowercase : Optional[int] =val[:dim, :]
lowercase : Optional[Any] =val[dim : dim * 2, :]
lowercase : Optional[int] =val[-dim:, :]
else:
lowercase : Tuple =config.hidden_size
lowercase : Any =int(key_split[1] )
lowercase : Optional[int] ='''videomae.encoder.layer.'''
if "weight" in key:
lowercase : Optional[Any] =val[:dim, :]
lowercase : Tuple =val[dim : dim * 2, :]
lowercase : str =val[-dim:, :]
else:
lowercase : int =val
return orig_state_dict
def lowercase_ ( ) -> Tuple:
"""simple docstring"""
lowercase : Tuple =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowercase : Tuple =np.load(lowerCAmelCase__ )
return list(lowerCAmelCase__ )
def lowercase_ ( __A : int , __A : Optional[Any] , __A : Any , __A : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Tuple =get_videomae_config(lowerCAmelCase__ )
if "finetuned" in model_name:
lowercase : str =VideoMAEForVideoClassification(lowerCAmelCase__ )
else:
lowercase : List[str] =VideoMAEForPreTraining(lowerCAmelCase__ )
# download original checkpoint, hosted on Google Drive
lowercase : Any ='''pytorch_model.bin'''
gdown.cached_download(lowerCAmelCase__ , lowerCAmelCase__ , quiet=lowerCAmelCase__ )
lowercase : str =torch.load(lowerCAmelCase__ , map_location='''cpu''' )
if "model" in files:
lowercase : List[str] =files['''model''']
else:
lowercase : Optional[int] =files['''module''']
lowercase : Tuple =convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# verify model on basic input
lowercase : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowercase : Any =prepare_video()
lowercase : int =image_processor(lowerCAmelCase__ , return_tensors='''pt''' )
if "finetuned" not in model_name:
lowercase : Dict =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowercase : Optional[Any] =torch.load(lowerCAmelCase__ )
lowercase : Any =model(**lowerCAmelCase__ )
lowercase : Union[str, Any] =outputs.logits
lowercase : Tuple =[
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowercase : str =torch.Size([1, 4_0_0] )
lowercase : List[Any] =torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
lowercase : str =torch.Size([1, 1_7_4] )
lowercase : Union[str, Any] =torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
lowercase : Tuple =torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase : Optional[Any] =torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
lowercase : Tuple =torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase : Any =torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowercase : List[str] =torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
lowercase : Dict =torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase : Dict =torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowercase : int =torch.Size([1, 4_0_0] )
lowercase : str =torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowercase : List[Any] =torch.Size([1, 4_0_0] )
lowercase : Tuple =torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowercase : Union[str, Any] =torch.Size([1, 4_0_0] )
lowercase : Optional[Any] =torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
lowercase : List[Any] =torch.Size([1, 4_0_0] )
lowercase : List[str] =torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
lowercase : Optional[int] =torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase : str =torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowercase : Tuple =torch.Size([1, 1_7_4] )
lowercase : str =torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
lowercase : List[Any] =torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase : Optional[Any] =torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowercase : Any =torch.Size([1, 1_7_4] )
lowercase : List[Any] =torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowercase : Optional[int] =outputs.loss
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(lowerCAmelCase__ , organization='''nielsr''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 713 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =list(poly_a or [0] )[:]
lowercase : Dict =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : Tuple =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[int] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : str =self.__multiply()
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase : List[Any] =self.c_max_length // 2
while next_ncol > 0:
lowercase : str =[[] for i in range(UpperCAmelCase )]
lowercase : List[str] =self.root**next_ncol
# First half of next step
lowercase : Union[str, Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Tuple =new_dft
lowercase : List[Any] =next_ncol // 2
return dft[0]
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.__dft('''A''' )
lowercase : Union[str, Any] =self.__dft('''B''' )
lowercase : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Tuple =2
while next_ncol <= self.c_max_length:
lowercase : Tuple =[[] for i in range(UpperCAmelCase )]
lowercase : Tuple =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : List[str] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] ='''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] ='''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCAmelCase_ ( __lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''BlipImageProcessor'''
UpperCamelCase_ = '''AutoTokenizer'''
def __init__( self : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
super().__init__(_UpperCamelCase , _UpperCamelCase )
# add QFormer tokenizer
lowercase : Dict =qformer_tokenizer
def __call__( self : Union[str, Any] , UpperCAmelCase : ImageInput = None , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Dict , ) -> BatchFeature:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
lowercase : Tuple =BatchFeature()
if text is not None:
lowercase : Union[str, Any] =self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
encoding.update(_UpperCamelCase )
lowercase : List[str] =self.qformer_tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
lowercase : str =qformer_text_encoding.pop('''input_ids''' )
lowercase : Any =qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
lowercase : str =self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase )
encoding.update(_UpperCamelCase )
return encoding
def A__ ( self : int , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def A__ ( self : Tuple , *UpperCAmelCase : List[str] , **UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def A__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowercase : int =self.tokenizer.model_input_names
lowercase : Union[str, Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def A__ ( self : int , UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
if os.path.isfile(_UpperCamelCase ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
lowercase : str =os.path.join(_UpperCamelCase , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(_UpperCamelCase )
return super().save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def A__ ( cls : int , UpperCAmelCase : str , **UpperCAmelCase : int ) -> List[str]:
'''simple docstring'''
lowercase : Dict =AutoTokenizer.from_pretrained(_UpperCamelCase , subfolder='''qformer_tokenizer''' )
lowercase : str =cls._get_arguments_from_pretrained(_UpperCamelCase , **_UpperCamelCase )
args.append(_UpperCamelCase )
return cls(*_UpperCamelCase )
| 714 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : List[Any] =str(bin(__A ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : Union[str, Any] =str(bin(__A ) )[2:]
if shift_amount >= len(__A ):
return "0b0"
lowercase : Any =binary_number[: len(__A ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
lowercase : str ='''0''' + str(bin(__A ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase : Dict =len(bin(__A )[3:] ) # Find 2's complement of number
lowercase : Optional[Any] =bin(abs(__A ) - (1 << binary_number_length) )[3:]
lowercase : int =(
'''1''' + '''0''' * (binary_number_length - len(__A )) + binary_number
)
if shift_amount >= len(__A ):
return "0b" + binary_number[0] * len(__A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class UpperCAmelCase_ ( a__ ):
"""simple docstring"""
UpperCamelCase_ = '''open-llama'''
def __init__( self : Any , UpperCAmelCase : List[Any]=10_0000 , UpperCAmelCase : int=4096 , UpperCAmelCase : List[Any]=1_1008 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : int=32 , UpperCAmelCase : Any="silu" , UpperCAmelCase : List[Any]=2048 , UpperCAmelCase : List[Any]=0.0_2 , UpperCAmelCase : Union[str, Any]=1e-6 , UpperCAmelCase : Dict=True , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : Dict=1 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=True , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Optional[int] , ) -> str:
'''simple docstring'''
lowercase : int =vocab_size
lowercase : Dict =max_position_embeddings
lowercase : Optional[Any] =hidden_size
lowercase : List[str] =intermediate_size
lowercase : Tuple =num_hidden_layers
lowercase : Dict =num_attention_heads
lowercase : str =hidden_act
lowercase : Optional[int] =initializer_range
lowercase : Optional[int] =rms_norm_eps
lowercase : Tuple =use_cache
lowercase : int =kwargs.pop(
'''use_memorry_efficient_attention''' , _A )
lowercase : List[Any] =hidden_dropout_prob
lowercase : Dict =attention_dropout_prob
lowercase : int =use_stable_embedding
lowercase : int =shared_input_output_embedding
lowercase : Optional[int] =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A , )
def A__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f'got {self.rope_scaling}' )
lowercase : Optional[Any] =self.rope_scaling.get('''type''' , _A )
lowercase : Tuple =self.rope_scaling.get('''factor''' , _A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 715 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def A__ ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
pass
def lowercase_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =pipeline(
'''document-question-answering''' , model=UpperCAmelCase , tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Optional[Any] =INVOICE_URL
lowercase : Any =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
lowercase : Dict ='''What is the placebo?'''
lowercase : Optional[Any] =[
{
'''image''': load_image(UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =dqa_pipeline(UpperCAmelCase , top_k=2 )
self.assertEqual(
UpperCAmelCase , [
[
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Dict =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
lowercase : Union[str, Any] =INVOICE_URL
lowercase : Tuple ='''How many cats are there?'''
lowercase : Optional[int] =[
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowercase : Optional[Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
lowercase : List[str] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase : Optional[Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Any =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase : int ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Dict =[]
lowercase : str =[]
lowercase : str =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , words=UpperCAmelCase , boxes=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
lowercase : Dict =INVOICE_URL
lowercase : str ='''What is the invoice number?'''
lowercase : List[str] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
lowercase : Dict =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Union[str, Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : int =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Any =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : str ) -> Dict:
'''simple docstring'''
lowercase : Any =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : Tuple =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , )
lowercase : Tuple =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Dict =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : Any =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : str =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
lowercase : Tuple =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Dict =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : Dict =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : List[Any] =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
lowercase : str =INVOICE_URL
lowercase : int ='''What is the invoice number?'''
lowercase : Tuple =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Union[str, Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
lowercase : List[str] =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Union[str, Any] =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
lowercase : Any =INVOICE_URL
lowercase : Union[str, Any] ='''What is the invoice number?'''
lowercase : int =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
pass
| 8 | 0 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = 42
# setable values
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = None
@classmethod
def A__ ( cls : Optional[int] , UpperCAmelCase : CommonSchedulerState , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : jnp.ndarray ) -> Optional[int]:
'''simple docstring'''
return cls(common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase )
@dataclass
class UpperCAmelCase_ ( _A ):
"""simple docstring"""
UpperCamelCase_ = 42
class UpperCAmelCase_ ( _A , _A ):
"""simple docstring"""
UpperCamelCase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
UpperCamelCase_ = 42
@property
def A__ ( self : List[Any] ) -> str:
'''simple docstring'''
return True
@register_to_config
def __init__( self : Tuple , UpperCAmelCase : int = 1000 , UpperCAmelCase : float = 0.0_0_0_1 , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : str = "linear" , UpperCAmelCase : Optional[jnp.ndarray] = None , UpperCAmelCase : str = "fixed_small" , UpperCAmelCase : bool = True , UpperCAmelCase : str = "epsilon" , UpperCAmelCase : jnp.dtype = jnp.floataa , ) -> List[str]:
'''simple docstring'''
lowercase : str =dtype
def A__ ( self : Tuple , UpperCAmelCase : Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState:
'''simple docstring'''
if common is None:
lowercase : int =CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase : Tuple =jnp.array(1.0 , dtype=self.dtype )
lowercase : Union[str, Any] =jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase , )
def A__ ( self : Tuple , UpperCAmelCase : DDPMSchedulerState , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : Optional[int] = None ) -> jnp.ndarray:
'''simple docstring'''
return sample
def A__ ( self : str , UpperCAmelCase : DDPMSchedulerState , UpperCAmelCase : int , UpperCAmelCase : Tuple = () ) -> DDPMSchedulerState:
'''simple docstring'''
lowercase : List[str] =self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase : int =(jnp.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__lowerCamelCase , timesteps=__lowerCamelCase , )
def A__ ( self : int , UpperCAmelCase : DDPMSchedulerState , UpperCAmelCase : Any , UpperCAmelCase : List[str]=None , UpperCAmelCase : int=None ) -> Any:
'''simple docstring'''
lowercase : str =state.common.alphas_cumprod[t]
lowercase : Union[str, Any] =jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase : str =(1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase : Tuple =self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase : str =jnp.clip(__lowerCamelCase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase : str =jnp.log(jnp.clip(__lowerCamelCase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowercase : Union[str, Any] =state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase : str =jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase : Optional[Any] =variance
lowercase : str =state.common.betas[t]
lowercase : Optional[int] =(predicted_variance + 1) / 2
lowercase : int =frac * max_log + (1 - frac) * min_log
return variance
def A__ ( self : int , UpperCAmelCase : DDPMSchedulerState , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : int , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : Optional[jax.random.KeyArray] = None , UpperCAmelCase : bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
'''simple docstring'''
lowercase : Union[str, Any] =timestep
if key is None:
lowercase : Any =jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase : Any =jnp.split(__lowerCamelCase , sample.shape[1] , axis=1 )
else:
lowercase : Tuple =None
# 1. compute alphas, betas
lowercase : Dict =state.common.alphas_cumprod[t]
lowercase : Tuple =jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase : Any =1 - alpha_prod_t
lowercase : str =1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase : Optional[int] =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase : Union[str, Any] =model_output
elif self.config.prediction_type == "v_prediction":
lowercase : Dict =(alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase : Dict =jnp.clip(__lowerCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase : Optional[int] =(alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase : Dict =state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase : List[str] =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase : str =jax.random.split(__lowerCamelCase , num=1 )
lowercase : Any =jax.random.normal(__lowerCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__lowerCamelCase , __lowerCamelCase , predicted_variance=__lowerCamelCase ) ** 0.5) * noise
lowercase : List[Any] =jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase : List[Any] =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__lowerCamelCase , state=__lowerCamelCase )
def A__ ( self : Dict , UpperCAmelCase : DDPMSchedulerState , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : jnp.ndarray , ) -> jnp.ndarray:
'''simple docstring'''
return add_noise_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def A__ ( self : List[str] , UpperCAmelCase : DDPMSchedulerState , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : jnp.ndarray , ) -> jnp.ndarray:
'''simple docstring'''
return get_velocity_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __len__( self : str ) -> str:
'''simple docstring'''
return self.config.num_train_timesteps
| 716 |
'''simple docstring'''
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A ) , __A )
return number - int(__A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 8 | 0 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class UpperCAmelCase_ ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ = '''masked_bert'''
def __init__( self : Dict , UpperCAmelCase : int=3_0522 , UpperCAmelCase : Any=768 , UpperCAmelCase : Optional[int]=12 , UpperCAmelCase : List[Any]=12 , UpperCAmelCase : List[str]=3072 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Tuple=512 , UpperCAmelCase : int=2 , UpperCAmelCase : Optional[int]=0.0_2 , UpperCAmelCase : List[Any]=1e-12 , UpperCAmelCase : str=0 , UpperCAmelCase : Dict="topK" , UpperCAmelCase : List[Any]="constant" , UpperCAmelCase : Optional[Any]=0.0 , **UpperCAmelCase : Any , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
lowercase : int =vocab_size
lowercase : Dict =hidden_size
lowercase : List[str] =num_hidden_layers
lowercase : List[Any] =num_attention_heads
lowercase : Tuple =hidden_act
lowercase : Optional[Any] =intermediate_size
lowercase : List[str] =hidden_dropout_prob
lowercase : str =attention_probs_dropout_prob
lowercase : Dict =max_position_embeddings
lowercase : Any =type_vocab_size
lowercase : Union[str, Any] =initializer_range
lowercase : Optional[Any] =layer_norm_eps
lowercase : Optional[Any] =pruning_method
lowercase : List[str] =mask_init
lowercase : int =mask_scale
| 717 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : List[Any] , __A : int , __A : int ) -> Optional[int]:
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowercase_ ( __A : np.ndarray , __A : Optional[str] , __A : Optional[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : int =to_pil_image(__A )
lowercase , lowercase : Tuple =pil_image.size
lowercase : Optional[Any] =pytesseract.image_to_data(__A , lang=__A , output_type='''dict''' , config=__A )
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase : Dict =[idx for idx, word in enumerate(__A ) if not word.strip()]
lowercase : str =[word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : Optional[int] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : List[Any] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : str =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : int =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase : Tuple =[]
for x, y, w, h in zip(__A , __A , __A , __A ):
lowercase : str =[x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
lowercase : List[str] =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : float = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = "" , **UpperCAmelCase : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Tuple =size if size is not None else {'''height''': 224, '''width''': 224}
lowercase : Optional[Any] =get_size_dict(UpperCAmelCase )
lowercase : Optional[Any] =do_resize
lowercase : List[Any] =size
lowercase : List[str] =resample
lowercase : Dict =do_rescale
lowercase : str =rescale_value
lowercase : Optional[int] =do_normalize
lowercase : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Union[str, Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase : List[Any] =apply_ocr
lowercase : Union[str, Any] =ocr_lang
lowercase : str =tesseract_config
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase : Tuple =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase : Optional[Any] =(size['''height'''], size['''width'''])
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase : Optional[int] =do_resize if do_resize is not None else self.do_resize
lowercase : Tuple =size if size is not None else self.size
lowercase : Optional[int] =get_size_dict(UpperCAmelCase )
lowercase : List[str] =resample if resample is not None else self.resample
lowercase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Optional[int] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[Any] =image_mean if image_mean is not None else self.image_mean
lowercase : Optional[int] =image_std if image_std is not None else self.image_std
lowercase : Any =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase : Any =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase : Dict =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase : str =make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowercase : Tuple =[to_numpy_array(UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase : int =[]
lowercase : Tuple =[]
for image in images:
lowercase , lowercase : Dict =apply_tesseract(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
words_batch.append(UpperCAmelCase )
boxes_batch.append(UpperCAmelCase )
if do_resize:
lowercase : int =[self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowercase : str =[self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowercase : Optional[Any] =[to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase : Dict =BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase )
if apply_ocr:
lowercase : int =words_batch
lowercase : List[str] =boxes_batch
return data
| 8 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : VQModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : DDIMScheduler ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self : Tuple , UpperCAmelCase : int = 1 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : float = 0.0 , UpperCAmelCase : int = 50 , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , **UpperCAmelCase : List[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
lowercase : str =randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__UpperCamelCase , )
lowercase : Optional[int] =latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase : Tuple =latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__UpperCamelCase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowercase : Any ='''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase : List[str] ={}
if accepts_eta:
lowercase : int =eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowercase : Optional[int] =self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
lowercase : List[str] =self.unet(__UpperCamelCase , __UpperCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase : List[str] =self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# decode the image latents with the VAE
lowercase : Dict =self.vqvae.decode(__UpperCamelCase ).sample
lowercase : int =(image / 2 + 0.5).clamp(0 , 1 )
lowercase : Union[str, Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : Union[str, Any] =self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 718 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =parent
lowercase : Any =13
lowercase : Any =7
lowercase : Optional[int] =True
lowercase : Optional[int] =True
lowercase : Tuple =False
lowercase : Optional[Any] =True
lowercase : Dict =99
lowercase : Union[str, Any] =32
lowercase : Union[str, Any] =2
lowercase : Union[str, Any] =4
lowercase : List[str] =37
lowercase : str ='''gelu'''
lowercase : Dict =0.1
lowercase : List[Any] =0.1
lowercase : List[str] =512
lowercase : Optional[int] =16
lowercase : Optional[Any] =2
lowercase : List[str] =0.0_2
lowercase : Any =3
lowercase : Optional[Any] =4
lowercase : int =None
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
lowercase : Any =None
lowercase : str =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =TFDistilBertModel(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(UpperCAmelCase )
lowercase : str =[input_ids, input_mask]
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =TFDistilBertForMaskedLM(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str =TFDistilBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Optional[Any] =TFDistilBertForSequenceClassification(UpperCAmelCase )
lowercase : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Optional[int] =TFDistilBertForMultipleChoice(UpperCAmelCase )
lowercase : Optional[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Tuple =TFDistilBertForTokenClassification(UpperCAmelCase )
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : str =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Union[str, Any] =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : str =TFDistilBertModelTester(self )
lowercase : int =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase : Union[str, Any] =TFDistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : Tuple =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[Any] =model(UpperCAmelCase )[0]
lowercase : str =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Optional[int] =tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 8 | 0 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def lowercase_ ( __A : Tuple , __A : Optional[Any] , __A : Tuple , __A : int , __A : Any ) -> np.array:
"""simple docstring"""
lowercase : Any =int(np.ceil((x_end - xa) / step_size ) )
lowercase : List[str] =np.zeros((n + 1,) )
lowercase : Union[str, Any] =ya
lowercase : str =xa
for k in range(__A ):
lowercase : Any =y[k] + step_size * ode_func(__A , y[k] )
lowercase : List[Any] =y[k] + (
(step_size / 2) * (ode_func(__A , y[k] ) + ode_func(x + step_size , __A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class lowercase__ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def A__ ( self : Any ) -> int:
'''simple docstring'''
lowercase : List[str] =pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def A__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaises(_lowercase ):
lowercase : Dict =pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def A__ ( self : List[Any] ) -> str:
'''simple docstring'''
with self.assertRaises(_lowercase ):
lowercase : str =pa.array(TypedSequence([1, 2, 3] , try_type=Value('''bool''' ) , type=Value('''int64''' ) ) )
def A__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Dict =pa.array(TypedSequence([1, 2, 3] , type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def A__ ( self : str ) -> Dict:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowercase : Tuple =pa.array(TypedSequence(['''foo''', '''bar'''] , type=Value('''int64''' ) ) )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Tuple =pa.array(TypedSequence([1, 2, 3] , try_type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def A__ ( self : Tuple ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] =pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=Value('''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
def A__ ( self : str ) -> int:
'''simple docstring'''
lowercase : List[Any] =pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def A__ ( self : Any ) -> List[Any]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowercase : List[str] =pa.array(TypedSequence(['''foo''', '''bar'''] , type=ArrayaD((1, 3) , '''int64''' ) ) )
def A__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
lowercase : List[Any] =pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def A__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowercase : str =pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
import PIL.Image
lowercase : int =PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
'''datasets.arrow_writer.cast_to_python_objects''' , side_effect=_lowercase ) as mock_cast_to_python_objects:
lowercase : Any =pa.array(TypedSequence([{'''path''': None, '''bytes''': b'''image_bytes'''}, pil_image] , type=Image() ) )
lowercase : List[Any] =mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('''optimize_list_casting''' , _lowercase )
self.assertFalse(kwargs['''optimize_list_casting'''] )
def lowercase_ ( __A : Tuple , __A : int ) -> Dict:
"""simple docstring"""
lowercase : List[str] =pa.BufferReader(__UpperCamelCase ) if isinstance(__UpperCamelCase , pa.Buffer ) else pa.memory_map(__UpperCamelCase )
lowercase : Optional[Any] =pa.ipc.open_stream(__UpperCamelCase )
lowercase : pa.Table =f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 1_0] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def lowercase_ ( __A : Tuple , __A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Optional[int] =pa.BufferOutputStream()
lowercase : List[Any] =pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
lowercase : Optional[Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase : Tuple ={"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowercase_ ( ) -> int:
"""simple docstring"""
lowercase : Union[str, Any] =pa.BufferOutputStream()
lowercase : Optional[Any] =Features({'''labels''': ClassLabel(names=['''neg''', '''pos'''] )} )
with ArrowWriter(stream=__UpperCamelCase , features=__UpperCamelCase ) as writer:
writer.write({'''labels''': 0} )
writer.write({'''labels''': 1} )
lowercase : int =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
lowercase : int =pa.BufferReader(output.getvalue() )
lowercase : Optional[int] =pa.ipc.open_stream(__UpperCamelCase )
lowercase : pa.Table =f.read_all()
lowercase : Optional[int] =pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__UpperCamelCase )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 1_0] )
def lowercase_ ( __A : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase : int =pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCamelCase , writer_batch_size=__UpperCamelCase , hash_salt='''split_name''' , check_duplicates=__UpperCamelCase , ) as writer:
with pytest.raises(__UpperCamelCase ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=[1, 2] )
lowercase : List[Any] =writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 1_0] )
def lowercase_ ( __A : str ) -> List[str]:
"""simple docstring"""
lowercase : List[str] =pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCamelCase , writer_batch_size=__UpperCamelCase , hash_salt='''split_name''' , check_duplicates=__UpperCamelCase , ) as writer:
with pytest.raises(__UpperCamelCase ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=1_0 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=1_0 )
lowercase : Dict =writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 1_0] )
def lowercase_ ( __A : List[str] ) -> int:
"""simple docstring"""
lowercase : List[Any] =pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCamelCase , writer_batch_size=__UpperCamelCase , hash_salt='''split_name''' , check_duplicates=__UpperCamelCase , ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=1 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=2 )
lowercase : List[Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 1_0] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def lowercase_ ( __A : int , __A : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase : Union[str, Any] =pa.BufferOutputStream()
lowercase : int =pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
writer.write_batch({'''col_1''': [], '''col_2''': []} )
lowercase : List[Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase : Tuple ={"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 1_0] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def lowercase_ ( __A : str , __A : int ) -> List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] =pa.BufferOutputStream()
lowercase : str =pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write_table(pa.Table.from_pydict({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) )
lowercase : Dict =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase : List[str] ={"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 1_0] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def lowercase_ ( __A : str , __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase : str =pa.BufferOutputStream()
lowercase : Optional[int] =pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''foo'''], '''col_2''': [1]} ) )
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''bar'''], '''col_2''': [2]} ) )
lowercase : Optional[Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase : List[Any] ={"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowercase_ ( ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : str ={"""col_1""": pa.string(), """col_2""": pa.intaa()}
lowercase : Any =os.path.join(__UpperCamelCase , '''test.arrow''' )
with ArrowWriter(path=__UpperCamelCase , schema=pa.schema(__UpperCamelCase ) ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
lowercase : Dict =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(__UpperCamelCase , 1 )
def lowercase_ ( __A : Any ) -> Any:
"""simple docstring"""
if pa.types.is_list(__UpperCamelCase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def lowercase_ ( __A : int , __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(lst[0] , __UpperCamelCase ):
change_first_primitive_element_in_list(lst[0] , __UpperCamelCase )
else:
lowercase : Tuple =value
@pytest.mark.parametrize('''optimized_int_type, expected_dtype''' , [(None, pa.intaa()), (Value('''int32''' ), pa.intaa())] )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowercase_ ( __A : Any , __A : List[str] , __A : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : str =pa.array(TypedSequence(__UpperCamelCase , optimized_int_type=__UpperCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'''col, expected_dtype''' , [
('''attention_mask''', pa.inta()),
('''special_tokens_mask''', pa.inta()),
('''token_type_ids''', pa.inta()),
('''input_ids''', pa.intaa()),
('''other''', pa.intaa()),
] , )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowercase_ ( __A : Dict , __A : str , __A : List[Any] ) -> Any:
"""simple docstring"""
lowercase : List[str] =pa.array(OptimizedTypedSequence(__UpperCamelCase , col=__UpperCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
lowercase : Any =copy.deepcopy(__UpperCamelCase )
lowercase : List[str] =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__UpperCamelCase , __UpperCamelCase )
lowercase : str =pa.array(OptimizedTypedSequence(__UpperCamelCase , col=__UpperCamelCase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('''raise_exception''' , [False, True] )
def lowercase_ ( __A : List[str] , __A : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase : List[str] =str(tmp_path / '''dataset-train.arrow''' )
try:
with ArrowWriter(path=__UpperCamelCase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def lowercase_ ( __A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : List[Any] ="""mock://dataset-train.arrow"""
with ArrowWriter(path=__UpperCamelCase , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__UpperCamelCase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
lowercase : int =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__UpperCamelCase )
def lowercase_ ( ) -> List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] =pa.BufferOutputStream()
with ParquetWriter(stream=__UpperCamelCase ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
lowercase : str =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
lowercase : Optional[int] =pa.BufferReader(output.getvalue() )
lowercase : pa.Table =pq.read_table(__UpperCamelCase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('''embed_local_files''' , [False, True] )
def lowercase_ ( __A : Optional[int] , __A : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
import PIL.Image
lowercase : int =str(tmp_path / '''test_image_rgb.jpg''' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__UpperCamelCase , format='''png''' )
lowercase : List[str] =pa.BufferOutputStream()
with ParquetWriter(
stream=__UpperCamelCase , features=Features({'''image''': Image()} ) , embed_local_files=__UpperCamelCase ) as writer:
writer.write({'''image''': image_path} )
writer.finalize()
lowercase : Tuple =pa.BufferReader(output.getvalue() )
lowercase : pa.Table =pq.read_table(__UpperCamelCase )
lowercase : List[Any] =pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['''image'''][0]['''path'''] , __UpperCamelCase )
with open(__UpperCamelCase , '''rb''' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def lowercase_ ( ) -> Tuple:
"""simple docstring"""
lowercase : Tuple =pa.schema([pa.field('''col_1''' , pa.string() , nullable=__UpperCamelCase )] )
lowercase : Optional[Any] =pa.BufferOutputStream()
with ArrowWriter(stream=__UpperCamelCase ) as writer:
writer._build_writer(inferred_schema=__UpperCamelCase )
assert writer._schema == pa.schema([pa.field('''col_1''' , pa.string() )] )
| 720 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version('torch'))
def lowercase_ ( __A : Union[str, Version] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase : Any =STR_OPERATION_TO_FUNC[operation]
if isinstance(__A , __A ):
lowercase : List[Any] =parse(importlib.metadata.version(__A ) )
return operation(__A , parse(__A ) )
def lowercase_ ( __A : str , __A : str ) -> Tuple:
"""simple docstring"""
return compare_versions(__A , __A , __A )
| 8 | 0 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class UpperCAmelCase_ ( _lowercase ):
"""simple docstring"""
UpperCamelCase_ = '''sequence-classification'''
def __init__( self : Any , UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if type(A_ ) == dict:
lowercase : Dict =Namespace(**A_ )
lowercase : Tuple =glue_output_modes[hparams.task]
lowercase : List[str] =glue_tasks_num_labels[hparams.task]
super().__init__(A_ , A_ , self.mode )
def A__ ( self : Dict , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return self.model(**A_ )
def A__ ( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] ={'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase : int =batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowercase : List[Any] =self(**A_ )
lowercase : Tuple =outputs[0]
lowercase : List[Any] =self.trainer.lr_schedulers[0]['''scheduler''']
lowercase : Dict ={'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A__ ( self : Optional[int] ) -> int:
'''simple docstring'''
lowercase : Any =self.hparams
lowercase : Tuple =processors[args.task]()
lowercase : int =processor.get_labels()
for mode in ["train", "dev"]:
lowercase : Tuple =self._feature_file(A_ )
if os.path.exists(A_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , A_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
lowercase : Any =(
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
lowercase : Any =convert_examples_to_features(
A_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , A_ )
torch.save(A_ , A_ )
def A__ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : bool = False ) -> DataLoader:
'''simple docstring'''
lowercase : Optional[Any] ='''dev''' if mode == '''test''' else mode
lowercase : List[Any] =self._feature_file(A_ )
logger.info('''Loading features from cached file %s''' , A_ )
lowercase : Any =torch.load(A_ )
lowercase : int =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase : Dict =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowercase : List[Any] =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowercase : List[str] =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowercase : Union[str, Any] =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(A_ , A_ , A_ , A_ ) , batch_size=A_ , shuffle=A_ , )
def A__ ( self : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] ={'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase : Optional[Any] =batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowercase : Tuple =self(**A_ )
lowercase , lowercase : List[Any] =outputs[:2]
lowercase : Dict =logits.detach().cpu().numpy()
lowercase : str =inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A__ ( self : List[str] , UpperCAmelCase : str ) -> tuple:
'''simple docstring'''
lowercase : Union[str, Any] =torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
lowercase : Tuple =np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowercase : Optional[int] =np.argmax(A_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowercase : Optional[Any] =np.squeeze(A_ )
lowercase : List[str] =np.concatenate([x['''target'''] for x in outputs] , axis=0 )
lowercase : Optional[int] =[[] for _ in range(out_label_ids.shape[0] )]
lowercase : List[Any] =[[] for _ in range(out_label_ids.shape[0] )]
lowercase : List[str] ={**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , A_ , A_ )}
lowercase : Any =dict(results.items() )
lowercase : Any =results
return ret, preds_list, out_label_list
def A__ ( self : Optional[Any] , UpperCAmelCase : list ) -> dict:
'''simple docstring'''
lowercase , lowercase , lowercase : str =self._eval_end(A_ )
lowercase : Dict =ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A__ ( self : Dict , UpperCAmelCase : Dict ) -> dict:
'''simple docstring'''
lowercase , lowercase , lowercase : List[str] =self._eval_end(A_ )
lowercase : Tuple =ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A__ ( UpperCAmelCase : int , UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
BaseTransformer.add_model_specific_args(A_ , A_ )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=A_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=A_ , required=A_ , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=A_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def lowercase_ ( ) -> Optional[Any]:
"""simple docstring"""
lowercase : Tuple =argparse.ArgumentParser()
add_generic_args(snake_case__ , os.getcwd() )
lowercase : int =GLUETransformer.add_model_specific_args(snake_case__ , os.getcwd() )
lowercase : Tuple =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowercase : List[Any] =os.path.join(
'''./results''' , F'{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}' , )
os.makedirs(args.output_dir )
lowercase : Optional[int] =GLUETransformer(snake_case__ )
lowercase : List[Any] =generic_train(snake_case__ , snake_case__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowercase : Union[str, Any] =sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=snake_case__ ) )
lowercase : List[Any] =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(snake_case__ )
if __name__ == "__main__":
main()
| 721 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase_ ( __A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
lowercase : List[Any] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
lowercase : List[str] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
lowercase : Union[str, Any] =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 8 | 0 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'Input must be a string of 8 numbers plus letter'
SCREAMING_SNAKE_CASE = 'TRWAGMYFPDXBNJZSQVHLCKE'
def lowercase_ ( __A : List[Any] ) -> bool:
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase : Optional[int] =F'Expected string as input, found {type(__SCREAMING_SNAKE_CASE ).__name__}'
raise TypeError(__SCREAMING_SNAKE_CASE )
lowercase : Optional[int] =spanish_id.replace('''-''' , '''''' ).upper()
if len(__SCREAMING_SNAKE_CASE ) != 9:
raise ValueError(__SCREAMING_SNAKE_CASE )
try:
lowercase : Union[str, Any] =int(spanish_id_clean[0:8] )
lowercase : Dict =spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__SCREAMING_SNAKE_CASE ) from ex
if letter.isdigit():
raise ValueError(__SCREAMING_SNAKE_CASE )
return letter == LOOKUP_LETTERS[number % 2_3]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = r'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@add_start_docstrings(UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , **UpperCAmelCase : Tuple ) -> bool:
'''simple docstring'''
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : Tuple , UpperCAmelCase : str = None ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =max_length
lowercase : int =max_position_embeddings
@add_start_docstrings(UpperCAmelCase )
def __call__( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , **UpperCAmelCase : Optional[int] ) -> bool:
'''simple docstring'''
lowercase : Tuple =input_ids.shape[-1]
lowercase : Any =cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
f'maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
f'Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '
'''with `max_length = start_length + max_new_tokens` instead.''' , UpperCAmelCase , )
lowercase : Optional[int] =start_length
lowercase : List[Any] =max_new_tokens
lowercase : Optional[Any] =start_length + max_new_tokens
@add_start_docstrings(UpperCAmelCase )
def __call__( self : str , UpperCAmelCase : int , UpperCAmelCase : Tuple , **UpperCAmelCase : Any ) -> bool:
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict = None ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] =max_time
lowercase : str =time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(UpperCAmelCase )
def __call__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[Any] ) -> bool:
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@add_start_docstrings(UpperCAmelCase )
def __call__( self : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , **UpperCAmelCase : Tuple ) -> bool:
'''simple docstring'''
return any(criteria(UpperCAmelCase , UpperCAmelCase ) for criteria in self )
@property
def A__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for stopping_criterium in self:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
return stopping_criterium.max_length
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
return stopping_criterium.max_length
return None
def lowercase_ ( __A : StoppingCriteriaList , __A : int ) -> Optional[Any]:
"""simple docstring"""
lowercase : Tuple =stopping_criteria.max_length
lowercase : List[str] =deepcopy(__A )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , __A )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__A ) )
return new_stopping_criteria
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int , __A : list[list[int]] ) -> Optional[Any]:
"""simple docstring"""
def update_area_of_max_square(__A : int , __A : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase : List[Any] =update_area_of_max_square(snake_case_ , col + 1 )
lowercase : Optional[int] =update_area_of_max_square(row + 1 , col + 1 )
lowercase : int =update_area_of_max_square(row + 1 , snake_case_ )
if mat[row][col]:
lowercase : Optional[int] =1 + min([right, diagonal, down] )
lowercase : Any =max(largest_square_area[0] , snake_case_ )
return sub_problem_sol
else:
return 0
lowercase : str =[0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowercase_ ( __A : int , __A : int , __A : list[list[int]] ) -> str:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
__A : int , __A : int , __A : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase : Tuple =update_area_of_max_square_using_dp_array(snake_case_ , col + 1 , snake_case_ )
lowercase : List[str] =update_area_of_max_square_using_dp_array(row + 1 , col + 1 , snake_case_ )
lowercase : List[Any] =update_area_of_max_square_using_dp_array(row + 1 , snake_case_ , snake_case_ )
if mat[row][col]:
lowercase : str =1 + min([right, diagonal, down] )
lowercase : Union[str, Any] =max(largest_square_area[0] , snake_case_ )
lowercase : int =sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase : List[Any] =[0]
lowercase : Dict =[[-1] * cols for _ in range(snake_case_ )]
update_area_of_max_square_using_dp_array(0 , 0 , snake_case_ )
return largest_square_area[0]
def lowercase_ ( __A : int , __A : int , __A : list[list[int]] ) -> List[str]:
"""simple docstring"""
lowercase : Tuple =[[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase : List[Any] =0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase : int =dp_array[row][col + 1]
lowercase : Optional[Any] =dp_array[row + 1][col + 1]
lowercase : List[Any] =dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase : Dict =1 + min(snake_case_ , snake_case_ , snake_case_ )
lowercase : Any =max(dp_array[row][col] , snake_case_ )
else:
lowercase : List[Any] =0
return largest_square_area
def lowercase_ ( __A : int , __A : int , __A : list[list[int]] ) -> Optional[int]:
"""simple docstring"""
lowercase : List[Any] =[0] * (cols + 1)
lowercase : str =[0] * (cols + 1)
lowercase : Tuple =0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase : Dict =current_row[col + 1]
lowercase : Optional[int] =next_row[col + 1]
lowercase : List[str] =next_row[col]
if mat[row][col] == 1:
lowercase : Dict =1 + min(snake_case_ , snake_case_ , snake_case_ )
lowercase : List[str] =max(current_row[col] , snake_case_ )
else:
lowercase : int =0
lowercase : Union[str, Any] =current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 702 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
SCREAMING_SNAKE_CASE = {'allegro/herbert-base-cased': 514}
SCREAMING_SNAKE_CASE = {}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = HerbertTokenizer
def __init__( self : Dict , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : str="<pad>" , UpperCAmelCase : Optional[Any]="<mask>" , UpperCAmelCase : List[str]="</s>" , **UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sep_token=UpperCAmelCase , **UpperCAmelCase , )
def A__ ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : List[Any] =[self.cls_token_id]
lowercase : Any =[self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[Any] =[self.sep_token_id]
lowercase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase : List[Any] =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 8 | 0 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowercase_ ( ) -> List[str]:
"""simple docstring"""
lowercase : List[str] =[randint(-1_0_0_0 , 1_0_0_0 ) for i in range(1_0 )]
lowercase : Tuple =randint(-5_0_0_0 , 5_0_0_0 )
return (arr, r)
SCREAMING_SNAKE_CASE = make_dataset()
def lowercase_ ( __A : list[int] , __A : int ) -> str:
"""simple docstring"""
for triplet in permutations(_lowerCamelCase , 3 ):
if sum(_lowerCamelCase ) == target:
return tuple(sorted(_lowerCamelCase ) )
return (0, 0, 0)
def lowercase_ ( __A : list[int] , __A : int ) -> Dict:
"""simple docstring"""
arr.sort()
lowercase : Optional[Any] =len(_lowerCamelCase )
for i in range(n - 1 ):
lowercase : Tuple =i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowercase_ ( ) -> Tuple:
"""simple docstring"""
lowercase : Any ="\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
lowercase : str ="\ntriplet_sum1(*dataset)\n"
lowercase : Optional[Any] ="\ntriplet_sum2(*dataset)\n"
lowercase : Any =repeat(setup=_lowerCamelCase , stmt=_lowerCamelCase , repeat=5 , number=1_0_0_0_0 )
lowercase : Dict =repeat(setup=_lowerCamelCase , stmt=_lowerCamelCase , repeat=5 , number=1_0_0_0_0 )
return (min(_lowerCamelCase ), min(_lowerCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 703 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Any ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[int] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Any ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Tuple ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : List[str] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : Tuple =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Optional[Any] ='''1'''
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : str ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Optional[int] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Optional[Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : str =self.get_env()
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Any ='''1'''
lowercase : Optional[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''
from transformers import pipeline
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase : Tuple =self.get_env()
lowercase : Optional[int] ='''1'''
lowercase : Union[str, Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase : Dict =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] ='''
from transformers import AutoModel
'''
lowercase : Dict ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : int =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : List[str] ='''1'''
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 8 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 704 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : int ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowercase : Stack[int] =Stack()
lowercase : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
lowercase : Optional[Any] =operator_stack.peek()
operator_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : List[str] =operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 8 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = None
UpperCamelCase_ = None
def lowercase_ ( ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =Node(1 )
lowercase : Dict =Node(2 )
lowercase : Dict =Node(3 )
lowercase : Dict =Node(4 )
lowercase : Union[str, Any] =Node(5 )
return tree
def lowercase_ ( __A : Node | None ) -> int:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowercase_ ( __A : Node | None ) -> Any:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowercase_ ( __A : Node | None ) -> str:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowercase_ ( __A : Node | None ) -> Union[str, Any]:
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowercase_ ( __A : Node | None ) -> List[Any]:
"""simple docstring"""
lowercase : list[Any] =[]
if root is None:
return output
lowercase : Union[str, Any] =deque([root] )
while process_queue:
lowercase : str =process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowercase_ ( __A : Node | None , __A : int ) -> str:
"""simple docstring"""
lowercase : list[Any] =[]
def populate_output(__A : Node | None , __A : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(snake_case__ , snake_case__ )
return output
def lowercase_ ( __A : Node | None , __A : int ) -> Dict:
"""simple docstring"""
lowercase : list[Any] =[]
def populate_output(__A : Node | None , __A : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(snake_case__ , snake_case__ )
return output
def lowercase_ ( __A : Node | None ) -> int:
"""simple docstring"""
if root is None:
return []
lowercase : list[Sequence[Node | None]] =[]
lowercase : List[Any] =0
lowercase : Optional[int] =height(snake_case__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(snake_case__ , snake_case__ ) )
lowercase : Tuple =1
else:
output.append(get_nodes_from_right_to_left(snake_case__ , snake_case__ ) )
lowercase : Tuple =0
return output
def lowercase_ ( ) -> List[Any]: # Main function for testing.
"""simple docstring"""
lowercase : Optional[int] =make_tree()
print(F'In-order Traversal: {inorder(snake_case__ )}' )
print(F'Pre-order Traversal: {preorder(snake_case__ )}' )
print(F'Post-order Traversal: {postorder(snake_case__ )}' , '''\n''' )
print(F'Height of Tree: {height(snake_case__ )}' , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(snake_case__ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(snake_case__ ) + 1 ):
print(F'Level {level}:' , get_nodes_from_left_to_right(snake_case__ , level=snake_case__ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 705 |
'''simple docstring'''
import re
def lowercase_ ( __A : str ) -> bool:
"""simple docstring"""
lowercase : Any =re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__A , __A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 8 | 0 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowercase_ ( __A : Tuple , __A : List[str]=() , __A : Any=None , __A : Dict="no" , __A : Optional[int]="29500" ) -> Dict:
"""simple docstring"""
lowercase : List[str] =False
lowercase : Union[str, Any] =False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
lowercase : str =True
elif "IPython" in sys.modules:
lowercase : Optional[Any] ='''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
lowercase : Dict =PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , __snake_case ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
lowercase : Any =8
lowercase : Any =PrepareForLaunch(__snake_case , distributed_type='''TPU''' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(__snake_case , args=__snake_case , nprocs=__snake_case , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*__snake_case )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__snake_case , master_addr='''127.0.01''' , master_port=__snake_case , mixed_precision=__snake_case ):
lowercase : Any =PrepareForLaunch(__snake_case , distributed_type='''MULTI_GPU''' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(__snake_case , args=__snake_case , nprocs=__snake_case , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowercase : int ='''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*__snake_case )
def lowercase_ ( __A : Any , __A : Optional[Any]=() , __A : int=2 ) -> Optional[Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__snake_case , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
lowercase : int =PrepareForLaunch(__snake_case , debug=__snake_case )
start_processes(__snake_case , args=__snake_case , nprocs=__snake_case , start_method='''fork''' )
| 706 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 8 | 0 |
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def lowercase_ ( __A : int=2 , __A : Optional[Any]=3 , __A : Optional[Any]=1_6 , __A : int = 1_0 , __A : int = 2 ) -> Optional[Any]:
"""simple docstring"""
def get_dataset(__A : Tuple ):
lowercase : Optional[Any] =torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCAmelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
lowercase : Any =get_dataset(UpperCAmelCase__ )
lowercase : str =get_dataset(UpperCAmelCase__ )
lowercase : Union[str, Any] =DataLoader(UpperCAmelCase__ , shuffle=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , num_workers=4 )
lowercase : Optional[Any] =DataLoader(UpperCAmelCase__ , shuffle=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase_ ( __A : str , __A : Optional[int] , __A : Union[str, Any] , __A : Union[str, Any] , __A : List[str] , __A : List[str]=None ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] =[]
for epoch in range(UpperCAmelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
lowercase : int =batch
lowercase : Tuple =model(UpperCAmelCase__ )
lowercase : Optional[Any] =torch.nn.functional.mse_loss(UpperCAmelCase__ , UpperCAmelCase__ )
accelerator.backward(UpperCAmelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase : List[Any] =nn.Parameter(torch.randn(1 ) )
lowercase : Tuple =nn.Parameter(torch.randn(1 ) )
def A__ ( self : Optional[int] , UpperCAmelCase : Dict ) -> List[Any]:
'''simple docstring'''
return x * self.a + self.b
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase : int =DummyModel()
lowercase : Optional[Any] =torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase : Any =dummy_dataloaders()
lowercase : int =ProjectConfiguration(total_limit=1 , project_dir=lowercase_ , automatic_checkpoint_naming=lowercase_ )
# Train baseline
lowercase : List[Any] =Accelerator(project_config=lowercase_ )
lowercase : List[str] =accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A__ ( self : str ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase : List[str] =DummyModel()
lowercase : Any =torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase : Dict =dummy_dataloaders()
# Train baseline
lowercase : str =Accelerator()
lowercase : Any =accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save initial
lowercase : Any =os.path.join(lowercase_ , '''initial''' )
accelerator.save_state(lowercase_ )
(lowercase) : Tuple =model.a.item(), model.b.item()
lowercase : Tuple =optimizer.state_dict()
lowercase : int =train(3 , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
(lowercase) : List[Any] =model.a.item(), model.b.item()
lowercase : Dict =optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase : List[Any] =DummyModel()
lowercase : int =torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase : List[str] =dummy_dataloaders()
lowercase : Optional[int] =Accelerator()
lowercase : Optional[Any] =accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
accelerator.load_state(lowercase_ )
(lowercase) : List[Any] =model.a.item(), model.b.item()
lowercase : Optional[int] =optimizer.state_dict()
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
lowercase : Optional[int] =train(2 , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save everything
lowercase : str =os.path.join(lowercase_ , '''checkpoint''' )
accelerator.save_state(lowercase_ )
# Load everything back in and make sure all states work
accelerator.load_state(lowercase_ )
test_rands += train(1 , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
(lowercase) : Tuple =model.a.item(), model.b.item()
lowercase : Tuple =optimizer.state_dict()
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase : Any =DummyModel()
lowercase : int =torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase : Dict =dummy_dataloaders()
lowercase : Tuple =ProjectConfiguration(automatic_checkpoint_naming=lowercase_ )
# Train baseline
lowercase : List[Any] =Accelerator(project_dir=lowercase_ , project_config=lowercase_ )
lowercase : str =accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save initial
accelerator.save_state()
(lowercase) : str =model.a.item(), model.b.item()
lowercase : str =optimizer.state_dict()
lowercase : Optional[Any] =train(3 , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
(lowercase) : str =model.a.item(), model.b.item()
lowercase : List[Any] =optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase : Union[str, Any] =DummyModel()
lowercase : Union[str, Any] =torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase : Union[str, Any] =dummy_dataloaders()
lowercase : Dict =ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowercase_ )
lowercase : int =Accelerator(project_dir=lowercase_ , project_config=lowercase_ )
lowercase : Optional[int] =accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
accelerator.load_state(os.path.join(lowercase_ , '''checkpoints''' , '''checkpoint_0''' ) )
(lowercase) : List[Any] =model.a.item(), model.b.item()
lowercase : List[str] =optimizer.state_dict()
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
lowercase : Optional[int] =train(2 , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowercase_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
(lowercase) : Tuple =model.a.item(), model.b.item()
lowercase : List[str] =optimizer.state_dict()
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def A__ ( self : Any ) -> int:
'''simple docstring'''
lowercase : List[str] =torch.tensor([1, 2, 3] )
lowercase : Any =torch.tensor([2, 3, 4] )
lowercase : Tuple =DummyModel()
lowercase : str =torch.optim.Adam(net.parameters() )
lowercase : Optional[Any] =Accelerator()
with self.assertRaises(lowercase_ ) as ve:
accelerator.register_for_checkpointing(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase : Optional[int] =str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def A__ ( self : Optional[int] ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase : Any =DummyModel()
lowercase : Union[str, Any] =torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase : int =torch.optim.lr_scheduler.StepLR(lowercase_ , step_size=1 , gamma=0.9_9 )
lowercase : List[Any] =dummy_dataloaders()
lowercase : List[str] =ProjectConfiguration(automatic_checkpoint_naming=lowercase_ )
# Train baseline
lowercase : Union[str, Any] =Accelerator(project_dir=lowercase_ , project_config=lowercase_ )
lowercase : Tuple =accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save initial
accelerator.save_state()
lowercase : Dict =scheduler.state_dict()
train(3 , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
self.assertNotEqual(lowercase_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowercase_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(lowercase_ , scheduler.state_dict() )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase : Dict =DummyModel()
lowercase : Optional[int] =ProjectConfiguration(automatic_checkpoint_naming=lowercase_ , total_limit=2 )
# Train baseline
lowercase : Optional[Any] =Accelerator(project_dir=lowercase_ , project_config=lowercase_ )
lowercase : str =accelerator.prepare(lowercase_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowercase_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def A__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] =["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = "/tmp/accelerate/state_checkpointing"
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters(), lr=1E-3)
SCREAMING_SNAKE_CASE = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
SCREAMING_SNAKE_CASE = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
SCREAMING_SNAKE_CASE = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE = group["params"][0].device
break
assert param_device.type == accelerator.device.type
SCREAMING_SNAKE_CASE = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE = group["params"][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 707 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''LayoutLMv2ImageProcessor'''
UpperCamelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : List[str] , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Any =kwargs.pop('''feature_extractor''' )
lowercase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase : Tuple =self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Optional[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : List[str] =features['''words''']
lowercase : Optional[Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
lowercase : List[str] =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : str =self.get_overflowing_images(UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Dict =images
return encoded_inputs
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : str =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}' )
return images_with_overflow
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8 | 0 |
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class UpperCAmelCase_ ( a__ ):
"""simple docstring"""
def __init__( self : int , **UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(self , ['''bs4'''] )
super().__init__(**lowerCAmelCase__ )
def A__ ( self : Optional[int] , UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[int] =[]
lowercase : Optional[Any] =[]
lowercase : Dict =element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowercase : List[str] =parent.find_all(child.name , recursive=lowerCAmelCase__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(lowerCAmelCase__ ) else next(i for i, s in enumerate(lowerCAmelCase__ , 1 ) if s is child ) )
lowercase : Optional[int] =parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
lowercase : str =BeautifulSoup(lowerCAmelCase__ , '''html.parser''' )
lowercase : Optional[int] =[]
lowercase : List[Any] =[]
lowercase : List[str] =[]
for element in html_code.descendants:
if type(lowerCAmelCase__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
lowercase : List[Any] =html.unescape(lowerCAmelCase__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(lowerCAmelCase__ )
lowercase : List[Any] =self.xpath_soup(lowerCAmelCase__ )
stringaxtag_seq.append(lowerCAmelCase__ )
stringaxsubs_seq.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =""
for tagname, subs in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
xpath += f'/{tagname}'
if subs != 0:
xpath += f'[{subs}]'
return xpath
def __call__( self : Any , UpperCAmelCase : Optional[int] ) -> BatchFeature:
'''simple docstring'''
lowercase : List[Any] =False
# Check that strings has a valid type
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase : Tuple =True
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
if len(lowerCAmelCase__ ) == 0 or isinstance(html_strings[0] , lowerCAmelCase__ ):
lowercase : int =True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f'but is of type {type(lowerCAmelCase__ )}.' )
lowercase : Optional[Any] =bool(isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(html_strings[0] , lowerCAmelCase__ )) )
if not is_batched:
lowercase : Dict =[html_strings]
# Get nodes + xpaths
lowercase : Optional[Any] =[]
lowercase : Dict =[]
for html_string in html_strings:
lowercase : str =self.get_three_from_single(lowerCAmelCase__ )
nodes.append(lowerCAmelCase__ )
lowercase : Dict =[]
for node, tag_list, sub_list in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase : Union[str, Any] =self.construct_xpath(lowerCAmelCase__ , lowerCAmelCase__ )
xpath_strings.append(lowerCAmelCase__ )
xpaths.append(lowerCAmelCase__ )
# return as Dict
lowercase : Optional[Any] ={"nodes": nodes, "xpaths": xpaths}
lowercase : List[Any] =BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
return encoded_inputs
| 708 |
'''simple docstring'''
def lowercase_ ( __A : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
"""simple docstring"""
try:
lowercase : Any =int(__A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =1
lowercase : Dict =2
while i * i <= n:
while n % i == 0:
lowercase : Optional[int] =i
n //= i
i += 1
if n > 1:
lowercase : Dict =n
return int(__A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8 | 0 |
'''simple docstring'''
def lowercase_ ( __A : str ) -> Union[str, Any]:
"""simple docstring"""
if not all(char in '''01''' for char in bin_string ):
raise ValueError('''Non-binary value was passed to the function''' )
if not bin_string:
raise ValueError('''Empty string was passed to the function''' )
lowercase : Dict =''''''
while len(UpperCAmelCase__ ) % 3 != 0:
lowercase : Tuple ='''0''' + bin_string
lowercase : Optional[int] =[
bin_string[index : index + 3]
for index in range(len(UpperCAmelCase__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowercase : int =0
for index, val in enumerate(UpperCAmelCase__ ):
oct_val += int(2 ** (2 - index) * int(UpperCAmelCase__ ) )
oct_string += str(UpperCAmelCase__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 709 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
lowercase : str =u
for i in range(1 , __A ):
lowercase : Any =temp * (u - i)
return temp
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : List[str] =int(input('''enter the numbers of values: ''' ) )
lowercase : list[list[float]] =[]
for _ in range(__A ):
y.append([] )
for i in range(__A ):
for j in range(__A ):
y[i].append(__A )
lowercase : List[Any] =0
print('''enter the values of parameters in a list: ''' )
lowercase : Optional[int] =list(map(__A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__A ):
lowercase : str =float(input() )
lowercase : int =int(input('''enter the value to interpolate: ''' ) )
lowercase : Union[str, Any] =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A ):
for j in range(n - i ):
lowercase : str =y[j + 1][i - 1] - y[j][i - 1]
lowercase : Any =y[0][0]
for i in range(1 , __A ):
summ += (ucal(__A , __A ) * y[0][i]) / math.factorial(__A )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 8 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class UpperCAmelCase_ ( __UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ = """encoder-decoder"""
UpperCamelCase_ = True
def __init__( self : Tuple , **UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowercase : List[str] =kwargs.pop('''encoder''' )
lowercase : Optional[int] =encoder_config.pop('''model_type''' )
lowercase : Optional[int] =kwargs.pop('''decoder''' )
lowercase : int =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase : Optional[Any] =AutoConfig.for_model(UpperCAmelCase_ , **UpperCAmelCase_ )
lowercase : int =AutoConfig.for_model(UpperCAmelCase_ , **UpperCAmelCase_ )
lowercase : Union[str, Any] =True
@classmethod
def A__ ( cls : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowercase : int =True
lowercase : Optional[Any] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase_ )
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : Dict =copy.deepcopy(self.__dict__ )
lowercase : str =self.encoder.to_dict()
lowercase : int =self.decoder.to_dict()
lowercase : Dict =self.__class__.model_type
return output
| 710 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =0
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : str =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : int =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str =CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase : Optional[int] =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : Optional[Any] =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase : str =CLIPImageProcessor(**UpperCAmelCase )
# save in new folder
model_config.save_pretrained(UpperCAmelCase )
config.save_pretrained(UpperCAmelCase )
lowercase : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : int =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained('''clip-base''' )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Optional[int] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
lowercase : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
lowercase : List[str] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[int] =CustomImageProcessor.from_pretrained(UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self : Any ) -> Any:
'''simple docstring'''
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# If remote code is not set, the default is to use local
lowercase : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase : Tuple =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Dict =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 8 | 0 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class UpperCAmelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Distribution , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]=0 ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =1.0 if scale is None else scale
lowercase : Optional[int] =0.0 if loc is None else loc
super().__init__(UpperCamelCase__ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCamelCase__ )] )
@property
def A__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def A__ ( self : List[Any] ) -> str:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def A__ ( self : int ) -> Dict:
'''simple docstring'''
return self.variance.sqrt()
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Callable[..., Tuple[torch.Tensor]] , **UpperCAmelCase : int ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase : Union[str, Any] =args_dim
lowercase : List[Any] =nn.ModuleList([nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) for dim in args_dim.values()] )
lowercase : Dict =domain_map
def A__ ( self : List[str] , UpperCAmelCase : torch.Tensor ) -> List[Any]:
'''simple docstring'''
lowercase : Any =[proj(UpperCamelCase__ ) for proj in self.proj]
return self.domain_map(*UpperCamelCase__ )
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Tuple ) -> List[str]:
'''simple docstring'''
super().__init__()
lowercase : Union[str, Any] =function
def A__ ( self : List[str] , UpperCAmelCase : Optional[Any] , *UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
return self.function(UpperCamelCase__ , *UpperCamelCase__ )
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
def __init__( self : str , UpperCAmelCase : int = 1 ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =dim
lowercase : List[str] ={k: dim * self.args_dim[k] for k in self.args_dim}
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*UpperCamelCase__ )
else:
return Independent(self.distribution_class(*UpperCamelCase__ ) , 1 )
def A__ ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] =self._base_distribution(UpperCamelCase__ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCamelCase__ , loc=UpperCamelCase__ , scale=UpperCamelCase__ , event_dim=self.event_dim )
@property
def A__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def A__ ( self : str ) -> List[Any]:
'''simple docstring'''
return len(self.event_shape )
@property
def A__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
return 0.0
def A__ ( self : Union[str, Any] , UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
return ParameterProjection(
in_features=UpperCamelCase__ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def A__ ( self : List[str] , *UpperCAmelCase : torch.Tensor ) -> Tuple:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def A__ ( UpperCAmelCase : torch.Tensor ) -> Optional[int]:
'''simple docstring'''
return (x + torch.sqrt(torch.square(UpperCamelCase__ ) + 4.0 )) / 2.0
class UpperCAmelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ = {'''df''': 1, '''loc''': 1, '''scale''': 1}
UpperCamelCase_ = StudentT
@classmethod
def A__ ( cls : Optional[Any] , UpperCAmelCase : torch.Tensor , UpperCAmelCase : torch.Tensor , UpperCAmelCase : torch.Tensor ) -> List[str]:
'''simple docstring'''
lowercase : Optional[Any] =cls.squareplus(UpperCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
lowercase : Any =2.0 + cls.squareplus(UpperCamelCase__ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class UpperCAmelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ = {'''loc''': 1, '''scale''': 1}
UpperCamelCase_ = Normal
@classmethod
def A__ ( cls : str , UpperCAmelCase : torch.Tensor , UpperCAmelCase : torch.Tensor ) -> List[Any]:
'''simple docstring'''
lowercase : int =cls.squareplus(UpperCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class UpperCAmelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ = {'''total_count''': 1, '''logits''': 1}
UpperCamelCase_ = NegativeBinomial
@classmethod
def A__ ( cls : Optional[int] , UpperCAmelCase : torch.Tensor , UpperCAmelCase : torch.Tensor ) -> str:
'''simple docstring'''
lowercase : List[str] =cls.squareplus(UpperCamelCase__ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def A__ ( self : Dict , UpperCAmelCase : List[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCamelCase__ , logits=UpperCamelCase__ )
else:
return Independent(self.distribution_class(total_count=UpperCamelCase__ , logits=UpperCamelCase__ ) , 1 )
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None ) -> Tuple:
'''simple docstring'''
lowercase : str =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 711 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE = 8.988E9 # units = N * m^s * C^-2
def lowercase_ ( __A : float , __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
lowercase : Dict =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowercase : Union[str, Any] =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase : Tuple =(COULOMBS_CONSTANT * charge_product / abs(__A )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any=13 , UpperCAmelCase : Any=2 , UpperCAmelCase : List[str]=24 , UpperCAmelCase : Dict=16 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=32 , UpperCAmelCase : Optional[Any]=5 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : List[str]=37 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : Tuple=10 , UpperCAmelCase : str=0.0_2 , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Dict=2 , ):
'''simple docstring'''
lowercase : Tuple =parent
lowercase : Any =batch_size
lowercase : List[Any] =patch_size
lowercase : Dict =max_length
lowercase : Tuple =num_mel_bins
lowercase : int =is_training
lowercase : Union[str, Any] =use_labels
lowercase : List[str] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Any =num_attention_heads
lowercase : Dict =intermediate_size
lowercase : Optional[Any] =hidden_act
lowercase : List[str] =hidden_dropout_prob
lowercase : Union[str, Any] =attention_probs_dropout_prob
lowercase : int =type_sequence_label_size
lowercase : int =initializer_range
lowercase : Any =scope
lowercase : str =frequency_stride
lowercase : List[Any] =time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowercase : int =(self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
lowercase : Any =(self.max_length - self.patch_size) // self.time_stride + 1
lowercase : List[Any] =frequency_out_dimension * time_out_dimension
lowercase : int =num_patches + 2
def A__ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[str] =floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
lowercase : Optional[Any] =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Optional[int] =self.get_config()
return config, input_values, labels
def A__ ( self : List[str] ):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def A__ ( self : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] ):
'''simple docstring'''
lowercase : str =ASTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase : Dict =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : int ):
'''simple docstring'''
lowercase : Dict =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Tuple =config_and_inputs
lowercase : Dict ={'''input_values''': input_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase_ = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Any ):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def A__ ( self : List[str] ):
'''simple docstring'''
lowercase : Union[str, Any] =ASTModelTester(self )
lowercase : Optional[Any] =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def A__ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def A__ ( self : List[Any] ):
'''simple docstring'''
pass
def A__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase , lowercase : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : List[str] =model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase : Optional[int] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def A__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : List[str] =model_class(_SCREAMING_SNAKE_CASE )
lowercase : Tuple =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : str =[*signature.parameters.keys()]
lowercase : Any =['''input_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def A__ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
@slow
def A__ ( self : str ):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[Any] =ASTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowercase_ ( ) -> Union[str, Any]:
"""simple docstring"""
lowercase : str =hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
lowercase , lowercase : Optional[Any] =torchaudio.load(__UpperCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self : str ):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def A__ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] =self.default_feature_extractor
lowercase : Optional[Any] =ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(_SCREAMING_SNAKE_CASE )
lowercase : Any =self.default_feature_extractor
lowercase , lowercase : List[Any] =prepare_audio()
lowercase : int =audio.squeeze().numpy()
lowercase : Optional[Any] =feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase : Optional[int] =model(**_SCREAMING_SNAKE_CASE )
# verify the logits
lowercase : List[Any] =torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowercase : int =torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 712 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8 | 0 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = 42
class UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
UpperCamelCase_ = 1
@register_to_config
def __init__( self : Tuple , UpperCAmelCase : int = 2000 , UpperCAmelCase : float = 0.1_5 , UpperCAmelCase : float = 0.0_1 , UpperCAmelCase : float = 1348.0 , UpperCAmelCase : float = 1e-5 , UpperCAmelCase : int = 1 , ) -> Optional[Any]:
'''simple docstring'''
lowercase : int =sigma_max
# setable values
lowercase : Any =None
self.set_sigmas(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : float = None , UpperCAmelCase : Union[str, torch.device] = None ) -> str:
'''simple docstring'''
lowercase : Dict =sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase : int =torch.linspace(1 , UpperCAmelCase , UpperCAmelCase , device=UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : int , UpperCAmelCase : float = None , UpperCAmelCase : float = None , UpperCAmelCase : float = None ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] =sigma_min if sigma_min is not None else self.config.sigma_min
lowercase : Any =sigma_max if sigma_max is not None else self.config.sigma_max
lowercase : Any =sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(UpperCAmelCase , UpperCAmelCase )
lowercase : Optional[int] =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase : Any =torch.exp(torch.linspace(math.log(UpperCAmelCase ) , math.log(UpperCAmelCase ) , UpperCAmelCase ) )
lowercase : List[str] =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def A__ ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def A__ ( self : List[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : bool = True , ) -> Union[SdeVeOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase : List[Any] =timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase : Union[str, Any] =(timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase : Optional[int] =timesteps.to(self.discrete_sigmas.device )
lowercase : Optional[int] =self.discrete_sigmas[timesteps].to(sample.device )
lowercase : Any =self.get_adjacent_sigma(UpperCAmelCase , UpperCAmelCase ).to(sample.device )
lowercase : str =torch.zeros_like(UpperCAmelCase )
lowercase : Dict =(sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase : List[str] =diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase : Optional[Any] =diffusion.unsqueeze(-1 )
lowercase : Tuple =drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase : List[Any] =randn_tensor(
sample.shape , layout=sample.layout , generator=UpperCAmelCase , device=sample.device , dtype=sample.dtype )
lowercase : str =sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase : Dict =prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=UpperCAmelCase , prev_sample_mean=UpperCAmelCase )
def A__ ( self : Dict , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase : int =randn_tensor(sample.shape , layout=sample.layout , generator=UpperCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase : str =torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowercase : Optional[int] =torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowercase : Optional[int] =(self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase : Optional[int] =step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase : List[Any] =step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase : Optional[Any] =step_size.unsqueeze(-1 )
lowercase : List[str] =sample + step_size * model_output
lowercase : Any =prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , ) -> torch.FloatTensor:
'''simple docstring'''
lowercase : Union[str, Any] =timesteps.to(original_samples.device )
lowercase : Tuple =self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase : str =(
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(UpperCAmelCase ) * sigmas[:, None, None, None]
)
lowercase : Any =noise + original_samples
return noisy_samples
def __len__( self : List[Any] ) -> Dict:
'''simple docstring'''
return self.config.num_train_timesteps
| 713 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =list(poly_a or [0] )[:]
lowercase : Dict =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : Tuple =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[int] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : str =self.__multiply()
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase : List[Any] =self.c_max_length // 2
while next_ncol > 0:
lowercase : str =[[] for i in range(UpperCAmelCase )]
lowercase : List[str] =self.root**next_ncol
# First half of next step
lowercase : Union[str, Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Tuple =new_dft
lowercase : List[Any] =next_ncol // 2
return dft[0]
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.__dft('''A''' )
lowercase : Union[str, Any] =self.__dft('''B''' )
lowercase : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Tuple =2
while next_ncol <= self.c_max_length:
lowercase : Tuple =[[] for i in range(UpperCAmelCase )]
lowercase : Tuple =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : List[str] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] ='''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] ='''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def A__ ( *UpperCAmelCase : List[str] , **UpperCAmelCase : int ) -> List[Any]:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def A__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Any =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowercase : int =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase : Any =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowercase ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowercase : Dict =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
],
] , )
@require_tf
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[int] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowercase : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase : List[Any] =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowercase : str =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
{'''score''': 0.3_3_3, '''label''': ANY(__lowercase )},
],
] , )
@slow
@require_torch
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowercase : List[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase : Union[str, Any] =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowercase : Optional[int] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowercase : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase : str =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowercase : Any =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 714 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : List[Any] =str(bin(__A ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : Union[str, Any] =str(bin(__A ) )[2:]
if shift_amount >= len(__A ):
return "0b0"
lowercase : Any =binary_number[: len(__A ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
lowercase : str ='''0''' + str(bin(__A ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase : Dict =len(bin(__A )[3:] ) # Find 2's complement of number
lowercase : Optional[Any] =bin(abs(__A ) - (1 << binary_number_length) )[3:]
lowercase : int =(
'''1''' + '''0''' * (binary_number_length - len(__A )) + binary_number
)
if shift_amount >= len(__A ):
return "0b" + binary_number[0] * len(__A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
SCREAMING_SNAKE_CASE = 8
def lowercase_ ( __A : Optional[Any] , __A : Tuple=BITS ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any =x.device
lowercase : List[str] =(x * 2_5_5).int().clamp(0 , 2_5_5 )
lowercase : int =2 ** torch.arange(bits - 1 , -1 , -1 , device=_lowerCAmelCase )
lowercase : Union[str, Any] =rearrange(_lowerCAmelCase , '''d -> d 1 1''' )
lowercase : Any =rearrange(_lowerCAmelCase , '''b c h w -> b c 1 h w''' )
lowercase : Any =((x & mask) != 0).float()
lowercase : Union[str, Any] =rearrange(_lowerCAmelCase , '''b c d h w -> b (c d) h w''' )
lowercase : Optional[int] =bits * 2 - 1
return bits
def lowercase_ ( __A : int , __A : int=BITS ) -> int:
"""simple docstring"""
lowercase : Dict =x.device
lowercase : str =(x > 0).int()
lowercase : Optional[Any] =2 ** torch.arange(bits - 1 , -1 , -1 , device=_lowerCAmelCase , dtype=torch.intaa )
lowercase : Union[str, Any] =rearrange(_lowerCAmelCase , '''d -> d 1 1''' )
lowercase : Tuple =rearrange(_lowerCAmelCase , '''b (c d) h w -> b c d h w''' , d=8 )
lowercase : List[Any] =reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 2_5_5).clamp(0.0 , 1.0 )
def lowercase_ ( self : Optional[int] , __A : Dict , __A : str , __A : Optional[Any] , __A : List[Any] = 0.0 , __A : List[str] = True , __A : str=None , __A : Dict = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowercase : List[Any] =timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowercase : List[str] =self.alphas_cumprod[timestep]
lowercase : str =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowercase : Optional[Any] =1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase : Any =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowercase : List[str] =self.bit_scale
if self.config.clip_sample:
lowercase : Any =torch.clamp(_lowerCAmelCase , -scale , _lowerCAmelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowercase : int =self._get_variance(_lowerCAmelCase , _lowerCAmelCase )
lowercase : Union[str, Any] =eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowercase : Tuple =(sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase : Union[str, Any] =(1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase : Tuple =alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowercase : Any =model_output.device if torch.is_tensor(_lowerCAmelCase ) else "cpu"
lowercase : Optional[Any] =torch.randn(model_output.shape , dtype=model_output.dtype , generator=_lowerCAmelCase ).to(_lowerCAmelCase )
lowercase : List[str] =self._get_variance(_lowerCAmelCase , _lowerCAmelCase ) ** 0.5 * eta * noise
lowercase : str =prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def lowercase_ ( self : Dict , __A : Optional[int] , __A : List[Any] , __A : str , __A : Optional[Any]="epsilon" , __A : Optional[Any]=None , __A : Optional[int] = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
lowercase : Tuple =timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowercase : List[Any] =torch.split(_lowerCAmelCase , sample.shape[1] , dim=1 )
else:
lowercase : Any =None
# 1. compute alphas, betas
lowercase : Union[str, Any] =self.alphas_cumprod[t]
lowercase : Optional[Any] =self.alphas_cumprod[t - 1] if t > 0 else self.one
lowercase : List[Any] =1 - alpha_prod_t
lowercase : Any =1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowercase : Any =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowercase : Optional[Any] =model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
lowercase : Any =self.bit_scale
if self.config.clip_sample:
lowercase : int =torch.clamp(_lowerCAmelCase , -scale , _lowerCAmelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase : Any =(alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowercase : List[Any] =self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase : List[str] =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowercase : Optional[Any] =0
if t > 0:
lowercase : Any =torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_lowerCAmelCase ).to(model_output.device )
lowercase : str =(self._get_variance(_lowerCAmelCase , predicted_variance=_lowerCAmelCase ) ** 0.5) * noise
lowercase : str =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
class UpperCAmelCase_ ( __snake_case ):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple = 1.0 , ) -> Any:
'''simple docstring'''
super().__init__()
lowercase : Optional[int] =bit_scale
lowercase : int =(
ddim_bit_scheduler_step if isinstance(A_ , A_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=A_ , scheduler=A_ )
@torch.no_grad()
def __call__( self : int , UpperCAmelCase : Optional[Any] = 256 , UpperCAmelCase : Any = 256 , UpperCAmelCase : str = 50 , UpperCAmelCase : List[Any] = None , UpperCAmelCase : Dict = 1 , UpperCAmelCase : int = "pil" , UpperCAmelCase : List[str] = True , **UpperCAmelCase : Any , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=A_ , )
lowercase : str =decimal_to_bits(A_ ) * self.bit_scale
lowercase : List[str] =latents.to(self.device )
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowercase : int =self.unet(A_ , A_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase : Tuple =self.scheduler.step(A_ , A_ , A_ ).prev_sample
lowercase : Any =bits_to_decimal(A_ )
if output_type == "pil":
lowercase : Any =self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 715 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def A__ ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
pass
def lowercase_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =pipeline(
'''document-question-answering''' , model=UpperCAmelCase , tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Optional[Any] =INVOICE_URL
lowercase : Any =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
lowercase : Dict ='''What is the placebo?'''
lowercase : Optional[Any] =[
{
'''image''': load_image(UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =dqa_pipeline(UpperCAmelCase , top_k=2 )
self.assertEqual(
UpperCAmelCase , [
[
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Dict =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
lowercase : Union[str, Any] =INVOICE_URL
lowercase : Tuple ='''How many cats are there?'''
lowercase : Optional[int] =[
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowercase : Optional[Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
lowercase : List[str] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase : Optional[Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Any =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase : int ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Dict =[]
lowercase : str =[]
lowercase : str =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , words=UpperCAmelCase , boxes=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
lowercase : Dict =INVOICE_URL
lowercase : str ='''What is the invoice number?'''
lowercase : List[str] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
lowercase : Dict =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Union[str, Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : int =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Any =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : str ) -> Dict:
'''simple docstring'''
lowercase : Any =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : Tuple =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , )
lowercase : Tuple =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Dict =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : Any =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : str =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
lowercase : Tuple =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Dict =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : Dict =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : List[Any] =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
lowercase : str =INVOICE_URL
lowercase : int ='''What is the invoice number?'''
lowercase : Tuple =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Union[str, Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
lowercase : List[str] =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Union[str, Any] =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
lowercase : Any =INVOICE_URL
lowercase : Union[str, Any] ='''What is the invoice number?'''
lowercase : int =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
pass
| 8 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 716 |
'''simple docstring'''
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A ) , __A )
return number - int(__A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 8 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class UpperCAmelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
UpperCamelCase_ = "funnel"
UpperCamelCase_ = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : Optional[int] , UpperCAmelCase : Any=3_0522 , UpperCAmelCase : Optional[Any]=[4, 4, 4] , UpperCAmelCase : Tuple=None , UpperCAmelCase : Any=2 , UpperCAmelCase : Optional[Any]=768 , UpperCAmelCase : str=12 , UpperCAmelCase : str=64 , UpperCAmelCase : Optional[Any]=3072 , UpperCAmelCase : Optional[int]="gelu_new" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=1e-9 , UpperCAmelCase : str="mean" , UpperCAmelCase : List[str]="relative_shift" , UpperCAmelCase : str=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Optional[Any]=True , **UpperCAmelCase : Any , ) -> Dict:
'''simple docstring'''
lowercase : Dict =vocab_size
lowercase : Optional[Any] =block_sizes
lowercase : Optional[int] =[1] * len(lowerCamelCase__ ) if block_repeats is None else block_repeats
assert len(lowerCamelCase__ ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
lowercase : List[Any] =num_decoder_layers
lowercase : List[Any] =d_model
lowercase : Tuple =n_head
lowercase : Union[str, Any] =d_head
lowercase : Union[str, Any] =d_inner
lowercase : List[str] =hidden_act
lowercase : List[Any] =hidden_dropout
lowercase : Any =attention_dropout
lowercase : List[str] =activation_dropout
lowercase : str =initializer_range
lowercase : Dict =initializer_std
lowercase : str =layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'
lowercase : Optional[Any] =pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'
lowercase : List[str] =attention_type
lowercase : Optional[Any] =separate_cls
lowercase : str =truncate_seq
lowercase : int =pool_q_only
super().__init__(**lowerCamelCase__ )
@property
def A__ ( self : Any ) -> int:
'''simple docstring'''
return sum(self.block_sizes )
@num_hidden_layers.setter
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def A__ ( self : str ) -> str:
'''simple docstring'''
return len(self.block_sizes )
@num_blocks.setter
def A__ ( self : Any , UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 717 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : List[Any] , __A : int , __A : int ) -> Optional[int]:
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowercase_ ( __A : np.ndarray , __A : Optional[str] , __A : Optional[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : int =to_pil_image(__A )
lowercase , lowercase : Tuple =pil_image.size
lowercase : Optional[Any] =pytesseract.image_to_data(__A , lang=__A , output_type='''dict''' , config=__A )
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase : Dict =[idx for idx, word in enumerate(__A ) if not word.strip()]
lowercase : str =[word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : Optional[int] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : List[Any] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : str =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : int =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase : Tuple =[]
for x, y, w, h in zip(__A , __A , __A , __A ):
lowercase : str =[x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
lowercase : List[str] =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : float = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = "" , **UpperCAmelCase : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Tuple =size if size is not None else {'''height''': 224, '''width''': 224}
lowercase : Optional[Any] =get_size_dict(UpperCAmelCase )
lowercase : Optional[Any] =do_resize
lowercase : List[Any] =size
lowercase : List[str] =resample
lowercase : Dict =do_rescale
lowercase : str =rescale_value
lowercase : Optional[int] =do_normalize
lowercase : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Union[str, Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase : List[Any] =apply_ocr
lowercase : Union[str, Any] =ocr_lang
lowercase : str =tesseract_config
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase : Tuple =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase : Optional[Any] =(size['''height'''], size['''width'''])
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase : Optional[int] =do_resize if do_resize is not None else self.do_resize
lowercase : Tuple =size if size is not None else self.size
lowercase : Optional[int] =get_size_dict(UpperCAmelCase )
lowercase : List[str] =resample if resample is not None else self.resample
lowercase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Optional[int] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[Any] =image_mean if image_mean is not None else self.image_mean
lowercase : Optional[int] =image_std if image_std is not None else self.image_std
lowercase : Any =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase : Any =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase : Dict =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase : str =make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowercase : Tuple =[to_numpy_array(UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase : int =[]
lowercase : Tuple =[]
for image in images:
lowercase , lowercase : Dict =apply_tesseract(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
words_batch.append(UpperCAmelCase )
boxes_batch.append(UpperCAmelCase )
if do_resize:
lowercase : int =[self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowercase : str =[self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowercase : Optional[Any] =[to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase : Dict =BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase )
if apply_ocr:
lowercase : int =words_batch
lowercase : List[str] =boxes_batch
return data
| 8 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =parent
lowercase : Any =13
lowercase : Any =7
lowercase : Optional[int] =True
lowercase : Optional[int] =True
lowercase : Tuple =False
lowercase : Optional[Any] =True
lowercase : Dict =99
lowercase : Union[str, Any] =32
lowercase : Union[str, Any] =2
lowercase : Union[str, Any] =4
lowercase : List[str] =37
lowercase : str ='''gelu'''
lowercase : Dict =0.1
lowercase : List[Any] =0.1
lowercase : List[str] =512
lowercase : Optional[int] =16
lowercase : Optional[Any] =2
lowercase : List[str] =0.0_2
lowercase : Any =3
lowercase : Optional[Any] =4
lowercase : int =None
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
lowercase : Any =None
lowercase : str =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =TFDistilBertModel(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(UpperCAmelCase )
lowercase : str =[input_ids, input_mask]
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =TFDistilBertForMaskedLM(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str =TFDistilBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Optional[Any] =TFDistilBertForSequenceClassification(UpperCAmelCase )
lowercase : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Optional[int] =TFDistilBertForMultipleChoice(UpperCAmelCase )
lowercase : Optional[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Tuple =TFDistilBertForTokenClassification(UpperCAmelCase )
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : str =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Union[str, Any] =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : str =TFDistilBertModelTester(self )
lowercase : int =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase : Union[str, Any] =TFDistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : Tuple =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[Any] =model(UpperCAmelCase )[0]
lowercase : str =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Optional[int] =tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 8 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
SCREAMING_SNAKE_CASE = 500_000
SCREAMING_SNAKE_CASE = os.path.split(__file__)
SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def lowercase_ ( __A : datasets.Dataset , **__A : Dict ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =dataset.map(**UpperCamelCase__ )
@get_duration
def lowercase_ ( __A : datasets.Dataset , **__A : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[int] =dataset.filter(**UpperCamelCase__ )
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : Tuple ={"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : str =datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase : Tuple =generate_example_dataset(
os.path.join(UpperCamelCase__ , '''dataset.arrow''' ) , UpperCamelCase__ , num_examples=UpperCamelCase__ )
lowercase : Tuple =transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=UpperCamelCase__ )
def tokenize(__A : Dict ):
return tokenizer(examples['''text'''] )
lowercase : Any =map(UpperCamelCase__ )
lowercase : Union[str, Any] =map(UpperCamelCase__ , batched=UpperCamelCase__ )
lowercase : List[str] =map(UpperCamelCase__ , function=lambda __A : None , batched=UpperCamelCase__ )
with dataset.formatted_as(type='''numpy''' ):
lowercase : Dict =map(UpperCamelCase__ , function=lambda __A : None , batched=UpperCamelCase__ )
with dataset.formatted_as(type='''pandas''' ):
lowercase : List[Any] =map(UpperCamelCase__ , function=lambda __A : None , batched=UpperCamelCase__ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase : str =map(UpperCamelCase__ , function=lambda __A : None , batched=UpperCamelCase__ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase : Tuple =map(UpperCamelCase__ , function=lambda __A : None , batched=UpperCamelCase__ )
lowercase : Tuple =map(UpperCamelCase__ , function=UpperCamelCase__ , batched=UpperCamelCase__ )
lowercase : List[str] =filter(UpperCamelCase__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(UpperCamelCase__ , '''wb''' ) as f:
f.write(json.dumps(UpperCamelCase__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase : Dict , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : int = 32 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , UpperCAmelCase : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , UpperCAmelCase : bool = True , UpperCAmelCase : Union[str, Any]=7 , UpperCAmelCase : Optional[int]=30 , UpperCAmelCase : Union[str, Any]=400 , UpperCAmelCase : List[Any]=3 , ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =parent
lowercase : Dict =do_resize
lowercase : str =size if size is not None else {'''shortest_edge''': 288}
lowercase : int =size_divisor
lowercase : Dict =do_rescale
lowercase : int =rescale_factor
lowercase : str =do_normalize
lowercase : List[Any] =do_center_crop
lowercase : Tuple =image_mean
lowercase : int =image_std
lowercase : Tuple =do_pad
lowercase : List[str] =batch_size
lowercase : Union[str, Any] =num_channels
lowercase : List[str] =min_resolution
lowercase : List[str] =max_resolution
def A__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A__ ( self : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict=False ) -> Any:
'''simple docstring'''
if not batched:
lowercase : Any =self.size['''shortest_edge''']
lowercase : Union[str, Any] =image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image ):
lowercase , lowercase : Union[str, Any] =image.size
else:
lowercase , lowercase : List[Any] =image.shape[1], image.shape[2]
lowercase : Union[str, Any] =size / min(UpperCAmelCase__ , UpperCAmelCase__ )
if h < w:
lowercase , lowercase : List[Any] =size, scale * w
else:
lowercase , lowercase : str =scale * h, size
lowercase : Dict =int((1333 / 800) * size )
if max(UpperCAmelCase__ , UpperCAmelCase__ ) > max_size:
lowercase : Optional[int] =max_size / max(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[int] =newh * scale
lowercase : Dict =neww * scale
lowercase , lowercase : List[str] =int(newh + 0.5 ), int(neww + 0.5 )
lowercase , lowercase : str =(
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
lowercase : Tuple =[]
for image in image_inputs:
lowercase , lowercase : Union[str, Any] =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase : Optional[int] =max(UpperCAmelCase__ , key=lambda UpperCAmelCase : item[0] )[0]
lowercase : int =max(UpperCAmelCase__ , key=lambda UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase__ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = BridgeTowerImageProcessor if is_vision_available() else None
def A__ ( self : Optional[int] ) -> str:
'''simple docstring'''
lowercase : int =BridgeTowerImageProcessingTester(self )
@property
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Optional[int] ) -> int:
'''simple docstring'''
lowercase : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size_divisor''' ) )
def A__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
pass
def A__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : List[str] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
lowercase : Any =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : str =self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase : Optional[Any] =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : int =self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
lowercase : Union[str, Any] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : str =self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase : List[str] =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Any =self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
lowercase : Any =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : Optional[int] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
lowercase : Optional[int] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Optional[int] =self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase : Optional[Any] =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : int =self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 720 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version('torch'))
def lowercase_ ( __A : Union[str, Version] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase : Any =STR_OPERATION_TO_FUNC[operation]
if isinstance(__A , __A ):
lowercase : List[Any] =parse(importlib.metadata.version(__A ) )
return operation(__A , parse(__A ) )
def lowercase_ ( __A : str , __A : str ) -> Tuple:
"""simple docstring"""
return compare_versions(__A , __A , __A )
| 8 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase , lowercase : Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
lowercase : Any ='''A painting of a squirrel eating a burger'''
lowercase : int =jax.device_count()
lowercase : Tuple =num_samples * [prompt]
lowercase : Tuple =sd_pipe.prepare_inputs(A__ )
lowercase : Any =replicate(A__ )
lowercase : List[str] =shard(A__ )
lowercase : Tuple =jax.random.PRNGKey(0 )
lowercase : Optional[int] =jax.random.split(A__ , jax.device_count() )
lowercase : Tuple =sd_pipe(A__ , A__ , A__ , num_inference_steps=25 , jit=A__ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowercase : Union[str, Any] =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase : List[Any] =images[0, 253:256, 253:256, -1]
lowercase : Any =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase : str =jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Optional[Any] ='''stabilityai/stable-diffusion-2'''
lowercase , lowercase : Union[str, Any] =FlaxDPMSolverMultistepScheduler.from_pretrained(A__ , subfolder='''scheduler''' )
lowercase , lowercase : Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
A__ , scheduler=A__ , revision='''bf16''' , dtype=jnp.bfloataa , )
lowercase : Union[str, Any] =scheduler_params
lowercase : Optional[Any] ='''A painting of a squirrel eating a burger'''
lowercase : Dict =jax.device_count()
lowercase : Optional[int] =num_samples * [prompt]
lowercase : Any =sd_pipe.prepare_inputs(A__ )
lowercase : Optional[int] =replicate(A__ )
lowercase : Optional[Any] =shard(A__ )
lowercase : List[str] =jax.random.PRNGKey(0 )
lowercase : Optional[int] =jax.random.split(A__ , jax.device_count() )
lowercase : int =sd_pipe(A__ , A__ , A__ , num_inference_steps=25 , jit=A__ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowercase : str =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase : Union[str, Any] =images[0, 253:256, 253:256, -1]
lowercase : Tuple =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase : Union[str, Any] =jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 721 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase_ ( __A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
lowercase : List[Any] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
lowercase : List[str] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
lowercase : Union[str, Any] =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 8 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = (PNDMScheduler,)
UpperCamelCase_ = (('num_inference_steps', 50),)
def A__ ( self : Any , **UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase : Dict ={
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**UpperCAmelCase )
return config
def A__ ( self : Union[str, Any] , UpperCAmelCase : List[Any]=0 , **UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =dict(self.forward_default_kwargs )
lowercase : Any =kwargs.pop('''num_inference_steps''' , UpperCAmelCase )
lowercase : Optional[int] =self.dummy_sample
lowercase : Union[str, Any] =0.1 * sample
lowercase : List[str] =[residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowercase : List[Any] =self.get_scheduler_config(**UpperCAmelCase )
lowercase : Any =scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
lowercase : Optional[int] =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
lowercase : Tuple =scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
lowercase : List[Any] =dummy_past_residuals[:]
lowercase : Dict =scheduler.step_prk(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
lowercase : Any =new_scheduler.step_prk(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase : Dict =scheduler.step_plms(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
lowercase : Union[str, Any] =new_scheduler.step_plms(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
pass
def A__ ( self : str , UpperCAmelCase : List[Any]=0 , **UpperCAmelCase : List[Any] ) -> str:
'''simple docstring'''
lowercase : Optional[int] =dict(self.forward_default_kwargs )
lowercase : Union[str, Any] =kwargs.pop('''num_inference_steps''' , UpperCAmelCase )
lowercase : int =self.dummy_sample
lowercase : Dict =0.1 * sample
lowercase : List[str] =[residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowercase : Dict =self.get_scheduler_config()
lowercase : str =scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase : Tuple =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
lowercase : Tuple =scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase : Union[str, Any] =dummy_past_residuals[:]
lowercase : Optional[Any] =scheduler.step_prk(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
lowercase : List[str] =new_scheduler.step_prk(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase : List[Any] =scheduler.step_plms(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
lowercase : Tuple =new_scheduler.step_plms(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A__ ( self : Optional[Any] , **UpperCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
lowercase : Union[str, Any] =self.scheduler_classes[0]
lowercase : List[str] =self.get_scheduler_config(**UpperCAmelCase )
lowercase : Optional[int] =scheduler_class(**UpperCAmelCase )
lowercase : str =10
lowercase : Tuple =self.dummy_model()
lowercase : Optional[Any] =self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
lowercase : Any =model(UpperCAmelCase , UpperCAmelCase )
lowercase : int =scheduler.step_prk(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowercase : Optional[Any] =model(UpperCAmelCase , UpperCAmelCase )
lowercase : str =scheduler.step_plms(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =dict(self.forward_default_kwargs )
lowercase : int =kwargs.pop('''num_inference_steps''' , UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase : List[Any] =self.get_scheduler_config()
lowercase : List[str] =scheduler_class(**UpperCAmelCase )
lowercase : List[Any] =self.dummy_sample
lowercase : Optional[int] =0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase , '''set_timesteps''' ):
scheduler.set_timesteps(UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase , '''set_timesteps''' ):
lowercase : Optional[int] =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase : Any =[residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
lowercase : Dict =dummy_past_residuals[:]
lowercase : List[Any] =scheduler.step_prk(UpperCAmelCase , 0 , UpperCAmelCase , **UpperCAmelCase ).prev_sample
lowercase : Optional[int] =scheduler.step_prk(UpperCAmelCase , 1 , UpperCAmelCase , **UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase : Optional[int] =scheduler.step_plms(UpperCAmelCase , 0 , UpperCAmelCase , **UpperCAmelCase ).prev_sample
lowercase : int =scheduler.step_plms(UpperCAmelCase , 1 , UpperCAmelCase , **UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def A__ ( self : Dict ) -> int:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCAmelCase )
lowercase : List[Any] =self.scheduler_classes[0]
lowercase : Optional[Any] =self.get_scheduler_config(steps_offset=1 )
lowercase : Dict =scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def A__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : Dict =27
for scheduler_class in self.scheduler_classes:
lowercase : Optional[int] =self.dummy_sample
lowercase : Optional[int] =0.1 * sample
lowercase : str =self.get_scheduler_config()
lowercase : int =scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowercase : Any =scheduler.step_prk(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
lowercase : List[Any] =self.scheduler_classes[0]
lowercase : Union[str, Any] =self.get_scheduler_config()
lowercase : Optional[int] =scheduler_class(**UpperCAmelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def A__ ( self : Any ) -> Any:
'''simple docstring'''
lowercase : Optional[Any] =self.full_loop()
lowercase : List[str] =torch.sum(torch.abs(UpperCAmelCase ) )
lowercase : Union[str, Any] =torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : int =self.full_loop(prediction_type='''v_prediction''' )
lowercase : Optional[int] =torch.sum(torch.abs(UpperCAmelCase ) )
lowercase : Tuple =torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =self.full_loop(set_alpha_to_one=UpperCAmelCase , beta_start=0.0_1 )
lowercase : Optional[int] =torch.sum(torch.abs(UpperCAmelCase ) )
lowercase : List[str] =torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =self.full_loop(set_alpha_to_one=UpperCAmelCase , beta_start=0.0_1 )
lowercase : List[Any] =torch.sum(torch.abs(UpperCAmelCase ) )
lowercase : Any =torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
from math import sqrt
def lowercase_ ( __A : Optional[int] = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
lowercase : Union[str, Any] =0
lowercase : Tuple =0
lowercase : List[str] =4_2
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowercase__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
def lowercase_ ( __A : Tuple , __A : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] =''''''
for i in table:
res += inp[i - 1]
return res
def lowercase_ ( __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return data[1:] + data[0]
def lowercase_ ( __A : Any , __A : List[str] ) -> int:
"""simple docstring"""
lowercase : Union[str, Any] =''''''
for i in range(len(lowerCAmelCase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase_ ( __A : Optional[int] , __A : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict =int('''0b''' + data[0] + data[-1] , 2 )
lowercase : Union[str, Any] =int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowercase_ ( __A : Optional[Any] , __A : int , __A : Union[str, Any] , __A : Optional[Any] , __A : str ) -> str:
"""simple docstring"""
lowercase : int =message[:4]
lowercase : List[Any] =message[4:]
lowercase : List[Any] =apply_table(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase : List[str] =xor(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase : Optional[Any] =apply_sbox(lowerCAmelCase_ , temp[:4] ) # noqa: E741
lowercase : List[str] =apply_sbox(lowerCAmelCase_ , temp[4:] )
lowercase : int ='''0''' * (2 - len(lowerCAmelCase_ )) + l # noqa: E741
lowercase : Dict ='''0''' * (2 - len(lowerCAmelCase_ )) + r
lowercase : List[Any] =apply_table(l + r , lowerCAmelCase_ )
lowercase : Dict =xor(lowerCAmelCase_ , lowerCAmelCase_ )
return temp + right
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input('Enter 10 bit key: ')
SCREAMING_SNAKE_CASE = input('Enter 8 bit message: ')
SCREAMING_SNAKE_CASE = [6, 3, 7, 4, 8, 5, 10, 9]
SCREAMING_SNAKE_CASE = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
SCREAMING_SNAKE_CASE = [2, 4, 3, 1]
SCREAMING_SNAKE_CASE = [2, 6, 3, 1, 4, 8, 5, 7]
SCREAMING_SNAKE_CASE = [4, 1, 3, 5, 7, 2, 8, 6]
SCREAMING_SNAKE_CASE = [4, 1, 2, 3, 2, 3, 4, 1]
SCREAMING_SNAKE_CASE = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
SCREAMING_SNAKE_CASE = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
SCREAMING_SNAKE_CASE = apply_table(key, paa_table)
SCREAMING_SNAKE_CASE = temp[:5]
SCREAMING_SNAKE_CASE = temp[5:]
SCREAMING_SNAKE_CASE = left_shift(left)
SCREAMING_SNAKE_CASE = left_shift(right)
SCREAMING_SNAKE_CASE = apply_table(left + right, pa_table)
SCREAMING_SNAKE_CASE = left_shift(left)
SCREAMING_SNAKE_CASE = left_shift(right)
SCREAMING_SNAKE_CASE = left_shift(left)
SCREAMING_SNAKE_CASE = left_shift(right)
SCREAMING_SNAKE_CASE = apply_table(left + right, pa_table)
# encryption
SCREAMING_SNAKE_CASE = apply_table(message, IP)
SCREAMING_SNAKE_CASE = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
SCREAMING_SNAKE_CASE = apply_table(CT, IP)
SCREAMING_SNAKE_CASE = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 702 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
SCREAMING_SNAKE_CASE = {'allegro/herbert-base-cased': 514}
SCREAMING_SNAKE_CASE = {}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = HerbertTokenizer
def __init__( self : Dict , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : str="<pad>" , UpperCAmelCase : Optional[Any]="<mask>" , UpperCAmelCase : List[str]="</s>" , **UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sep_token=UpperCAmelCase , **UpperCAmelCase , )
def A__ ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : List[Any] =[self.cls_token_id]
lowercase : Any =[self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[Any] =[self.sep_token_id]
lowercase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase : List[Any] =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 8 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCAmelCase_ ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ = """trocr"""
UpperCamelCase_ = ["""past_key_values"""]
UpperCamelCase_ = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : Optional[int] , UpperCAmelCase : List[Any]=5_0265 , UpperCAmelCase : Tuple=1024 , UpperCAmelCase : List[Any]=12 , UpperCAmelCase : Union[str, Any]=16 , UpperCAmelCase : Any=4096 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Optional[Any]=512 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : int=0.0_2 , UpperCAmelCase : str=0.0 , UpperCAmelCase : int=True , UpperCAmelCase : Any=False , UpperCAmelCase : Tuple=True , UpperCAmelCase : Dict=True , UpperCAmelCase : List[Any]=1 , UpperCAmelCase : Dict=0 , UpperCAmelCase : Dict=2 , **UpperCAmelCase : Tuple , ) -> Dict:
'''simple docstring'''
lowercase : Union[str, Any] =vocab_size
lowercase : int =d_model
lowercase : Any =decoder_layers
lowercase : str =decoder_attention_heads
lowercase : Optional[Any] =decoder_ffn_dim
lowercase : str =activation_function
lowercase : Dict =max_position_embeddings
lowercase : Dict =dropout
lowercase : Optional[int] =attention_dropout
lowercase : Dict =activation_dropout
lowercase : Tuple =init_std
lowercase : Optional[int] =decoder_layerdrop
lowercase : Optional[int] =use_cache
lowercase : Union[str, Any] =scale_embedding
lowercase : Optional[Any] =use_learned_position_embeddings
lowercase : Dict =layernorm_embedding
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , **lowercase__ , )
| 703 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Any ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[int] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Any ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Tuple ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : List[str] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : Tuple =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Optional[Any] ='''1'''
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : str ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Optional[int] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Optional[Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : str =self.get_env()
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Any ='''1'''
lowercase : Optional[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''
from transformers import pipeline
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase : Tuple =self.get_env()
lowercase : Optional[int] ='''1'''
lowercase : Union[str, Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase : Dict =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] ='''
from transformers import AutoModel
'''
lowercase : Dict ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : int =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : List[str] ='''1'''
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 8 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowercase_ ( __A : str , __A : Union[str, Any] , __A : List[str]=None , __A : Optional[Any]=None , __A : List[str]=None , __A : List[str]=None , __A : Optional[Any]=None , __A : int=None , ) -> Dict:
"""simple docstring"""
if attention_mask is None:
lowercase : Union[str, Any] =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowercase : Optional[int] =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowercase : List[Any] =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase : List[Any] =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase : Optional[Any] =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any]=13 , UpperCAmelCase : Any=7 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=False , UpperCAmelCase : Optional[int]=99 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : str=2 , UpperCAmelCase : List[Any]=4 , UpperCAmelCase : List[Any]=4 , UpperCAmelCase : Tuple="gelu" , UpperCAmelCase : str=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : Optional[Any]=32 , UpperCAmelCase : int=2 , UpperCAmelCase : Optional[int]=1 , UpperCAmelCase : str=0 , UpperCAmelCase : str=0.0_2 , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Any =batch_size
lowercase : str =seq_length
lowercase : Optional[Any] =is_training
lowercase : Tuple =use_labels
lowercase : List[str] =vocab_size
lowercase : Any =hidden_size
lowercase : str =num_hidden_layers
lowercase : Any =num_attention_heads
lowercase : Tuple =intermediate_size
lowercase : List[Any] =hidden_act
lowercase : Tuple =hidden_dropout_prob
lowercase : Dict =attention_probs_dropout_prob
lowercase : List[Any] =max_position_embeddings
lowercase : List[str] =eos_token_id
lowercase : Tuple =pad_token_id
lowercase : Tuple =bos_token_id
lowercase : int =initializer_range
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : str =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowercase : Union[str, Any] =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowercase : List[Any] =shift_tokens_right(__snake_case , 1 , 2 )
lowercase : Optional[int] =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__snake_case , )
lowercase : Dict =prepare_blenderbot_inputs_dict(__snake_case , __snake_case , __snake_case )
return config, inputs_dict
def A__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] =self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowercase : Tuple =20
lowercase : Union[str, Any] =model_class_name(__snake_case )
lowercase : Optional[Any] =model.encode(inputs_dict['''input_ids'''] )
lowercase : Dict =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowercase : Optional[Any] =model.init_cache(decoder_input_ids.shape[0] , __snake_case , __snake_case )
lowercase : Union[str, Any] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowercase : int =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase : Optional[Any] =model.decode(
decoder_input_ids[:, :-1] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=__snake_case , decoder_position_ids=__snake_case , )
lowercase : Optional[int] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase : str =model.decode(
decoder_input_ids[:, -1:] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__snake_case , )
lowercase : Optional[int] =model.decode(__snake_case , __snake_case )
lowercase : Tuple =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def A__ ( self : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
lowercase : List[str] =20
lowercase : Union[str, Any] =model_class_name(__snake_case )
lowercase : Any =model.encode(inputs_dict['''input_ids'''] )
lowercase : Optional[int] =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowercase : str =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowercase : Optional[int] =model.init_cache(decoder_input_ids.shape[0] , __snake_case , __snake_case )
lowercase : Any =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase : int =model.decode(
decoder_input_ids[:, :-1] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=__snake_case , decoder_position_ids=__snake_case , )
lowercase : Union[str, Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase : int =model.decode(
decoder_input_ids[:, -1:] , __snake_case , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__snake_case , decoder_position_ids=__snake_case , )
lowercase : str =model.decode(__snake_case , __snake_case , decoder_attention_mask=__snake_case )
lowercase : Any =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 99
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : Dict =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowercase : Union[str, Any] =input_ids.shape[0]
lowercase : Any =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def A__ ( self : List[str] ) -> Any:
'''simple docstring'''
lowercase : List[Any] =self._get_config_and_data()
lowercase : str =FlaxBlenderbotSmallForConditionalGeneration(__snake_case )
lowercase : Any =lm_model(input_ids=__snake_case )
lowercase : Union[str, Any] =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __snake_case )
def A__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : int =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowercase : Optional[int] =FlaxBlenderbotSmallForConditionalGeneration(__snake_case )
lowercase : List[Any] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
lowercase : List[Any] =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
lowercase : List[str] =lm_model(input_ids=__snake_case , decoder_input_ids=__snake_case )
lowercase : Tuple =(*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __snake_case )
def A__ ( self : Dict ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
lowercase : Union[str, Any] =shift_tokens_right(__snake_case , 1 , 2 )
lowercase : Dict =np.equal(__snake_case , 1 ).astype(np.floataa ).sum()
lowercase : str =np.equal(__snake_case , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__snake_case , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ = True
UpperCamelCase_ = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCamelCase_ = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Optional[int] =FlaxBlenderbotSmallModelTester(self )
def A__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__snake_case , __snake_case , __snake_case )
def A__ ( self : str ) -> Dict:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__snake_case , __snake_case , __snake_case )
def A__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase : Tuple =self._prepare_for_class(__snake_case , __snake_case )
lowercase : Optional[int] =model_class(__snake_case )
@jax.jit
def encode_jitted(UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Optional[Any] ):
return model.encode(input_ids=__snake_case , attention_mask=__snake_case )
with self.subTest('''JIT Enabled''' ):
lowercase : Optional[int] =encode_jitted(**__snake_case ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase : Any =encode_jitted(**__snake_case ).to_tuple()
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for jitted_output, output in zip(__snake_case , __snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase : Tuple =model_class(__snake_case )
lowercase : Dict =model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowercase : Tuple ={
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] ):
return model.decode(
decoder_input_ids=__snake_case , decoder_attention_mask=__snake_case , encoder_outputs=__snake_case , )
with self.subTest('''JIT Enabled''' ):
lowercase : Tuple =decode_jitted(**__snake_case ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase : Optional[Any] =decode_jitted(**__snake_case ).to_tuple()
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for jitted_output, output in zip(__snake_case , __snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def A__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase : Optional[Any] =model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowercase : Optional[int] =np.ones((1, 1) ) * model.config.eos_token_id
lowercase : Union[str, Any] =model(__snake_case )
self.assertIsNotNone(__snake_case )
| 704 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : int ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowercase : Stack[int] =Stack()
lowercase : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
lowercase : Optional[Any] =operator_stack.peek()
operator_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : List[str] =operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 8 | 0 |
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowercase_ ( __A : bool = True , *__A : Tuple , **__A : Tuple ) -> str:
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
lowercase : Tuple =False
if main_process_only:
lowercase : Optional[int] =PartialState().local_process_index == 0
return _tqdm(*__A , **__A , disable=__A )
| 705 |
'''simple docstring'''
import re
def lowercase_ ( __A : str ) -> bool:
"""simple docstring"""
lowercase : Any =re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__A , __A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 8 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : Tuple ) -> str:
"""simple docstring"""
lowercase : int =torch.load(lowercase_ , map_location='''cpu''' )
if "model" in sd.keys():
lowercase : Union[str, Any] =torch.load(lowercase_ , map_location='''cpu''' )["model"]
# pop unnecessary weights
lowercase : List[Any] =[
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase_ )
lowercase : Optional[Any] ={
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowercase : str =sd.pop(lowercase_ )
lowercase : Dict =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowercase : List[str] =sd[key]
# We split QKV in separate Q,K,V
lowercase : Tuple =key.replace('''.qkv_proj.''' , '''.q_proj.''' )
lowercase : Any =key.replace('''.qkv_proj.''' , '''.k_proj.''' )
lowercase : Tuple =key.replace('''.qkv_proj.''' , '''.v_proj.''' )
lowercase : Optional[Any] =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowercase : str =torch.split(lowercase_ , depth // 3 , dim=0 )
lowercase : int =q
lowercase : Optional[Any] =k
lowercase : int =v
del sd[key]
return sd
@torch.no_grad()
def lowercase_ ( __A : Union[str, Any] , __A : Any , __A : List[Any]=None ) -> int:
"""simple docstring"""
lowercase : List[Any] =load_checkpoint(lowercase_ )
if config is not None:
lowercase : Tuple =OPTConfig.from_pretrained(lowercase_ )
else:
lowercase : Union[str, Any] =OPTConfig()
lowercase : List[Any] =OPTModel(lowercase_ ).half().eval()
model.load_state_dict(lowercase_ )
# Check results
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 706 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 8 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
SCREAMING_SNAKE_CASE = list[tuple[int, int]]
SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Node | None ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =pos_x
lowercase : Optional[Any] =pos_y
lowercase : Any =(pos_y, pos_x)
lowercase : List[str] =goal_x
lowercase : str =goal_y
lowercase : Tuple =parent
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : tuple[int, int] , UpperCAmelCase : tuple[int, int] ) -> Tuple:
'''simple docstring'''
lowercase : Optional[Any] =Node(start[1] , start[0] , goal[1] , goal[0] , _snake_case )
lowercase : Optional[int] =Node(goal[1] , goal[0] , goal[1] , goal[0] , _snake_case )
lowercase : Any =[self.start]
lowercase : Union[str, Any] =False
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
while self.node_queue:
lowercase : Any =self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowercase : Union[str, Any] =True
return self.retrace_path(_snake_case )
lowercase : List[Any] =self.get_successors(_snake_case )
for node in successors:
self.node_queue.append(_snake_case )
if not self.reached:
return [self.start.pos]
return None
def A__ ( self : List[str] , UpperCAmelCase : Node ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] =[]
for action in delta:
lowercase : Optional[Any] =parent.pos_x + action[1]
lowercase : Union[str, Any] =parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_snake_case ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_snake_case , _snake_case , self.target.pos_y , self.target.pos_x , _snake_case ) )
return successors
def A__ ( self : Dict , UpperCAmelCase : Node | None ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =node
lowercase : Union[str, Any] =[]
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowercase : Optional[int] =current_node.parent
path.reverse()
return path
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : str , UpperCAmelCase : int ) -> Dict:
'''simple docstring'''
lowercase : int =BreadthFirstSearch(_snake_case , _snake_case )
lowercase : Optional[int] =BreadthFirstSearch(_snake_case , _snake_case )
lowercase : Any =False
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowercase : Tuple =self.fwd_bfs.node_queue.pop(0 )
lowercase : List[str] =self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowercase : List[Any] =True
return self.retrace_bidirectional_path(
_snake_case , _snake_case )
lowercase : str =current_bwd_node
lowercase : int =current_fwd_node
lowercase : Any ={
self.fwd_bfs: self.fwd_bfs.get_successors(_snake_case ),
self.bwd_bfs: self.bwd_bfs.get_successors(_snake_case ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_snake_case )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def A__ ( self : Optional[Any] , UpperCAmelCase : Node , UpperCAmelCase : Node ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =self.fwd_bfs.retrace_path(_snake_case )
lowercase : Any =self.bwd_bfs.retrace_path(_snake_case )
bwd_path.pop()
bwd_path.reverse()
lowercase : Union[str, Any] =fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE = (0, 0)
SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
SCREAMING_SNAKE_CASE = time.time()
SCREAMING_SNAKE_CASE = BreadthFirstSearch(init, goal)
SCREAMING_SNAKE_CASE = bfs.search()
SCREAMING_SNAKE_CASE = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
SCREAMING_SNAKE_CASE = time.time()
SCREAMING_SNAKE_CASE = BidirectionalBreadthFirstSearch(init, goal)
SCREAMING_SNAKE_CASE = bd_bfs.search()
SCREAMING_SNAKE_CASE = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 707 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''LayoutLMv2ImageProcessor'''
UpperCamelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : List[str] , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Any =kwargs.pop('''feature_extractor''' )
lowercase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase : Tuple =self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Optional[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : List[str] =features['''words''']
lowercase : Optional[Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
lowercase : List[str] =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : str =self.get_overflowing_images(UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Dict =images
return encoded_inputs
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : str =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}' )
return images_with_overflow
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8 | 0 |
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase_ ( __A : str , __A : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : int =torch.load(__A , map_location='''cpu''' )
lowercase : str =chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
lowercase : Tuple ={}
for k, v in state_dict.items():
if "pred_layer" in k:
lowercase : str =v
else:
lowercase : Tuple =v
lowercase : Union[str, Any] =chkpt['''params''']
lowercase : List[str] ={n: v for n, v in config.items() if not isinstance(__A , (torch.FloatTensor, numpy.ndarray) )}
lowercase : str =chkpt['''dico_word2id''']
lowercase : int ={s + '''</w>''' if s.find('''@@''' ) == -1 and i > 1_3 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
lowercase : int =pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowercase : Union[str, Any] =pytorch_dump_folder_path + '''/''' + CONFIG_NAME
lowercase : Any =pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(__A , __A )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__A , indent=2 ) + '''\n''' )
print(F'Save vocab file to {pytorch_config_dump_path}' )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__A , indent=2 ) + '''\n''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 708 |
'''simple docstring'''
def lowercase_ ( __A : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
"""simple docstring"""
try:
lowercase : Any =int(__A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =1
lowercase : Dict =2
while i * i <= n:
while n % i == 0:
lowercase : Optional[int] =i
n //= i
i += 1
if n > 1:
lowercase : Dict =n
return int(__A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8 | 0 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
SCREAMING_SNAKE_CASE = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def lowercase_ ( __A : int ) -> Any:
"""simple docstring"""
lowercase : int =(images / 2 + 0.5).clamp(0 , 1 )
lowercase : List[Any] =images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase : List[str] =numpy_to_pil(snake_case_ )
return images
def lowercase_ ( __A : Optional[int] ) -> Tuple:
"""simple docstring"""
if images.ndim == 3:
lowercase : List[str] =images[None, ...]
lowercase : int =(images * 2_5_5).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase : str =[Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
lowercase : Tuple =[Image.fromarray(snake_case_ ) for image in images]
return pil_images
| 709 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
lowercase : str =u
for i in range(1 , __A ):
lowercase : Any =temp * (u - i)
return temp
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : List[str] =int(input('''enter the numbers of values: ''' ) )
lowercase : list[list[float]] =[]
for _ in range(__A ):
y.append([] )
for i in range(__A ):
for j in range(__A ):
y[i].append(__A )
lowercase : List[Any] =0
print('''enter the values of parameters in a list: ''' )
lowercase : Optional[int] =list(map(__A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__A ):
lowercase : str =float(input() )
lowercase : int =int(input('''enter the value to interpolate: ''' ) )
lowercase : Union[str, Any] =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A ):
for j in range(n - i ):
lowercase : str =y[j + 1][i - 1] - y[j][i - 1]
lowercase : Any =y[0][0]
for i in range(1 , __A ):
summ += (ucal(__A , __A ) * y[0][i]) / math.factorial(__A )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 8 | 0 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , '''width_multiplier''' ) )
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple=13 , UpperCAmelCase : Dict=64 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Tuple=3 , UpperCAmelCase : Dict="swish" , UpperCAmelCase : str=3 , UpperCAmelCase : int=32 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Dict=0.0_2 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Dict=10 , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[int]=0.2_5 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Tuple=0.0 , ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] =parent
lowercase : List[str] =batch_size
lowercase : str =image_size
lowercase : Dict =patch_size
lowercase : Optional[int] =num_channels
lowercase : List[Any] =make_divisible(512 * width_multiplier , divisor=8 )
lowercase : Optional[Any] =hidden_act
lowercase : Any =conv_kernel_size
lowercase : str =output_stride
lowercase : Any =classifier_dropout_prob
lowercase : Union[str, Any] =use_labels
lowercase : str =is_training
lowercase : Tuple =num_labels
lowercase : str =initializer_range
lowercase : List[Any] =scope
lowercase : List[str] =width_multiplier
lowercase : int =ffn_dropout
lowercase : Union[str, Any] =attn_dropout
def A__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowercase : Dict =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Union[str, Any] =None
lowercase : Optional[Any] =None
if self.use_labels:
lowercase : str =ids_tensor([self.batch_size] , self.num_labels )
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase : Any =self.get_config()
return config, pixel_values, labels, pixel_labels
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def A__ ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
lowercase : str =MobileViTVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase : Dict =model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A__ ( self : str , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : List[Any] =MobileViTVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase : Union[str, Any] =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Any =self.num_labels
lowercase : Optional[Any] =MobileViTVaForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase : Union[str, Any] =model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase : Union[str, Any] =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : List[str] =self.prepare_config_and_inputs()
lowercase : Dict =config_and_inputs
lowercase : Tuple ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : List[str] =MobileViTVaModelTester(self )
lowercase : int =MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def A__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def A__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
pass
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[int] =model_class(UpperCamelCase__ )
lowercase : Any =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : List[str] =[*signature.parameters.keys()]
lowercase : List[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def A__ ( self : Optional[int] ) -> str:
'''simple docstring'''
lowercase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A__ ( self : int ) -> str:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] ):
lowercase : Optional[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowercase : Tuple =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowercase : Union[str, Any] =outputs.hidden_states
lowercase : Any =5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowercase : str =2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[Any] =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : List[Any] =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def A__ ( self : Tuple ) -> int:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def A__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def A__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Optional[int] =MobileViTVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowercase_ ( ) -> Dict:
"""simple docstring"""
lowercase : str =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self : Any ) -> Tuple:
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def A__ ( self : Any ) -> int:
'''simple docstring'''
lowercase : int =MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
UpperCamelCase__ )
lowercase : Dict =self.default_image_processor
lowercase : List[Any] =prepare_img()
lowercase : List[str] =image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowercase : List[Any] =model(**UpperCamelCase__ )
# verify the logits
lowercase : int =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowercase : List[Any] =torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : int =MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase : int =model.to(UpperCamelCase__ )
lowercase : str =MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase : List[str] =prepare_img()
lowercase : int =image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowercase : Dict =model(**UpperCamelCase__ )
lowercase : Any =outputs.logits
# verify the logits
lowercase : Any =torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowercase : Optional[int] =torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def A__ ( self : Any ) -> Any:
'''simple docstring'''
lowercase : str =MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase : Tuple =model.to(UpperCamelCase__ )
lowercase : int =MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase : Tuple =prepare_img()
lowercase : Optional[Any] =image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowercase : Union[str, Any] =model(**UpperCamelCase__ )
lowercase : Optional[int] =outputs.logits.detach().cpu()
lowercase : List[str] =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
lowercase : Union[str, Any] =torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
lowercase : List[str] =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
lowercase : str =torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 710 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =0
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : str =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : int =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str =CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase : Optional[int] =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : Optional[Any] =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase : str =CLIPImageProcessor(**UpperCAmelCase )
# save in new folder
model_config.save_pretrained(UpperCAmelCase )
config.save_pretrained(UpperCAmelCase )
lowercase : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : int =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained('''clip-base''' )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Optional[int] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
lowercase : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
lowercase : List[str] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[int] =CustomImageProcessor.from_pretrained(UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self : Any ) -> Any:
'''simple docstring'''
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# If remote code is not set, the default is to use local
lowercase : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase : Tuple =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Dict =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 8 | 0 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCAmelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : List[str]=0.0_1 , UpperCAmelCase : str=1000 ) -> List[Any]:
'''simple docstring'''
lowercase : Any =p_stop
lowercase : int =max_length
def __iter__( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =0
lowercase : Dict =False
while not stop and count < self.max_length:
yield count
count += 1
lowercase : List[Any] =random.random() < self.p_stop
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str=False , UpperCAmelCase : Any=True ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[
BatchSamplerShard(_lowerCamelCase , 2 , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
for i in range(2 )
]
lowercase : Tuple =[list(_lowerCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_lowerCamelCase ) for shard in batch_sampler_shards] , [len(_lowerCamelCase ) for e in expected] )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def A__ ( self : int ) -> Dict:
'''simple docstring'''
lowercase : str =BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase )
lowercase : int =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
lowercase : Optional[Any] =BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase : List[str] =BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase )
lowercase : Optional[int] =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
lowercase : str =BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase )
lowercase : List[Any] =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase : Dict =BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase )
lowercase : List[str] =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
lowercase : Dict =BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase )
lowercase : int =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase : List[Any] =BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase )
lowercase : List[Any] =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
lowercase : List[str] =BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase )
lowercase : List[str] =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is very small.
lowercase : str =BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
lowercase : str =[[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
lowercase : Any =BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
lowercase : str =[[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase )
lowercase : Union[str, Any] =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
lowercase : Tuple =BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase : Dict =BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase )
lowercase : Any =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
lowercase : List[str] =BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase )
lowercase : str =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase : List[str] =BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase )
lowercase : Any =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
lowercase : Tuple =BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase )
lowercase : List[str] =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
# Check the shards when the dataset is very small.
lowercase : Optional[Any] =BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
lowercase : List[str] =[[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
lowercase : Union[str, Any] =BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
lowercase : Dict =[[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase )
lowercase : Optional[int] =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
lowercase : Optional[int] =BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase : List[Any] =BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase )
lowercase : int =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
lowercase : Tuple =BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase )
lowercase : Dict =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase : Optional[Any] =BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase )
lowercase : Union[str, Any] =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
lowercase : int =BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase )
lowercase : Any =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase : Any =BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase )
lowercase : Union[str, Any] =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
lowercase : List[Any] =BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase )
lowercase : Any =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is very small.
lowercase : int =BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
lowercase : List[Any] =[[[0, 1]], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
lowercase : Optional[Any] =BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
lowercase : Any =[[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
def A__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : List[str] =BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase )
lowercase : str =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
lowercase : Tuple =BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase : str =BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase )
lowercase : Tuple =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
lowercase : Dict =BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase )
lowercase : Optional[Any] =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase : List[Any] =BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase )
lowercase : Optional[Any] =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
lowercase : Tuple =BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase )
lowercase : Dict =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is very small.
lowercase : int =BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
lowercase : str =[[[0, 1]], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
lowercase : Optional[int] =BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
lowercase : Optional[Any] =[[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =[[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
lowercase : Optional[Any] =[BatchSamplerShard(_lowerCamelCase , 2 , _lowerCamelCase , even_batches=_lowerCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def A__ ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : int=False , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Dict=False ) -> str:
'''simple docstring'''
random.seed(_lowerCamelCase )
lowercase : Any =list(_lowerCamelCase )
lowercase : Tuple =[
IterableDatasetShard(
_lowerCamelCase , batch_size=_lowerCamelCase , drop_last=_lowerCamelCase , num_processes=_lowerCamelCase , process_index=_lowerCamelCase , split_batches=_lowerCamelCase , )
for i in range(_lowerCamelCase )
]
lowercase : str =[]
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_lowerCamelCase )
iterable_dataset_lists.append(list(_lowerCamelCase ) )
lowercase : Optional[Any] =batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowercase : List[Any] =iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
self.assertTrue(len(_lowerCamelCase ) % shard_batch_size == 0 )
lowercase : Dict =[]
for idx in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_lowerCamelCase ) < len(_lowerCamelCase ):
reference += reference
self.assertListEqual(_lowerCamelCase , reference[: len(_lowerCamelCase )] )
def A__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[int] =42
lowercase : int =RandomIterableDataset()
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
# Edge case with a very small dataset
lowercase : List[str] =RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
lowercase : str =BatchSampler(range(16 ) , batch_size=4 , drop_last=_lowerCamelCase )
lowercase : List[str] =SkipBatchSampler(_lowerCamelCase , 2 )
self.assertListEqual(list(_lowerCamelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] =DataLoader(list(range(16 ) ) , batch_size=4 )
lowercase : Any =skip_first_batches(_lowerCamelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def A__ ( self : Optional[int] ) -> str:
'''simple docstring'''
Accelerator()
lowercase : int =DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 711 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE = 8.988E9 # units = N * m^s * C^-2
def lowercase_ ( __A : float , __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
lowercase : Dict =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowercase : Union[str, Any] =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase : Tuple =(COULOMBS_CONSTANT * charge_product / abs(__A )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
import logging
from transformers import PretrainedConfig
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class UpperCAmelCase_ ( _A ):
"""simple docstring"""
UpperCamelCase_ = '''bertabs'''
def __init__( self : Any , UpperCAmelCase : List[str]=3_0522 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=6 , UpperCAmelCase : Optional[int]=512 , UpperCAmelCase : Optional[int]=8 , UpperCAmelCase : Union[str, Any]=512 , UpperCAmelCase : Optional[Any]=0.2 , UpperCAmelCase : List[str]=6 , UpperCAmelCase : Union[str, Any]=768 , UpperCAmelCase : Optional[int]=8 , UpperCAmelCase : int=2048 , UpperCAmelCase : int=0.2 , **UpperCAmelCase : Any , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : int =vocab_size
lowercase : Optional[Any] =max_pos
lowercase : List[Any] =enc_layers
lowercase : List[str] =enc_hidden_size
lowercase : str =enc_heads
lowercase : Optional[Any] =enc_ff_size
lowercase : Any =enc_dropout
lowercase : int =dec_layers
lowercase : List[str] =dec_hidden_size
lowercase : Dict =dec_heads
lowercase : Any =dec_ff_size
lowercase : str =dec_dropout
| 712 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8 | 0 |
'''simple docstring'''
import math
def lowercase_ ( __A : str , __A : Any ) -> List[Any]:
"""simple docstring"""
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_6_0:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(a__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 713 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =list(poly_a or [0] )[:]
lowercase : Dict =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : Tuple =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[int] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : str =self.__multiply()
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase : List[Any] =self.c_max_length // 2
while next_ncol > 0:
lowercase : str =[[] for i in range(UpperCAmelCase )]
lowercase : List[str] =self.root**next_ncol
# First half of next step
lowercase : Union[str, Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Tuple =new_dft
lowercase : List[Any] =next_ncol // 2
return dft[0]
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.__dft('''A''' )
lowercase : Union[str, Any] =self.__dft('''B''' )
lowercase : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Tuple =2
while next_ncol <= self.c_max_length:
lowercase : Tuple =[[] for i in range(UpperCAmelCase )]
lowercase : Tuple =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : List[str] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] ='''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] ='''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
SCREAMING_SNAKE_CASE = namedtuple('covid_data', 'cases deaths recovered')
def lowercase_ ( __A : str = "https://www.worldometers.info/coronavirus/" ) -> int:
"""simple docstring"""
lowercase : int ='''//div[@class = \"maincounter-number\"]/span/text()'''
return covid_data(*html.fromstring(requests.get(UpperCamelCase__ ).content ).xpath(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 714 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : List[Any] =str(bin(__A ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : Union[str, Any] =str(bin(__A ) )[2:]
if shift_amount >= len(__A ):
return "0b0"
lowercase : Any =binary_number[: len(__A ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
lowercase : str ='''0''' + str(bin(__A ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase : Dict =len(bin(__A )[3:] ) # Find 2's complement of number
lowercase : Optional[Any] =bin(abs(__A ) - (1 << binary_number_length) )[3:]
lowercase : int =(
'''1''' + '''0''' * (binary_number_length - len(__A )) + binary_number
)
if shift_amount >= len(__A ):
return "0b" + binary_number[0] * len(__A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : list[str] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =[]
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(__SCREAMING_SNAKE_CASE )
self.set_fail_transitions()
def A__ ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str ) -> str:
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def A__ ( self : List[str] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : List[Any] =0
for character in keyword:
lowercase : List[str] =self.find_next_state(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
lowercase : Dict =len(self.adlist ) - 1
else:
lowercase : Any =next_state
self.adlist[current_state]["output"].append(__SCREAMING_SNAKE_CASE )
def A__ ( self : Dict ) -> int:
'''simple docstring'''
lowercase : str =deque()
for node in self.adlist[0]["next_states"]:
q.append(__SCREAMING_SNAKE_CASE )
lowercase : List[Any] =0
while q:
lowercase : List[Any] =q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__SCREAMING_SNAKE_CASE )
lowercase : List[Any] =self.adlist[r]['''fail_state''']
while (
self.find_next_state(__SCREAMING_SNAKE_CASE , self.adlist[child]['''value'''] ) is None
and state != 0
):
lowercase : int =self.adlist[state]['''fail_state''']
lowercase : Optional[int] =self.find_next_state(
__SCREAMING_SNAKE_CASE , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
lowercase : Optional[int] =0
lowercase : List[str] =(
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def A__ ( self : Optional[Any] , UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
lowercase : Optional[int] ={} # returns a dict with keywords and list of its occurrences
lowercase : str =0
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
while (
self.find_next_state(__SCREAMING_SNAKE_CASE , string[i] ) is None
and current_state != 0
):
lowercase : Optional[int] =self.adlist[current_state]['''fail_state''']
lowercase : Any =self.find_next_state(__SCREAMING_SNAKE_CASE , string[i] )
if next_state is None:
lowercase : Optional[Any] =0
else:
lowercase : Union[str, Any] =next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
lowercase : Any =[]
result[key].append(i - len(__SCREAMING_SNAKE_CASE ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def A__ ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
pass
def lowercase_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =pipeline(
'''document-question-answering''' , model=UpperCAmelCase , tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Optional[Any] =INVOICE_URL
lowercase : Any =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
lowercase : Dict ='''What is the placebo?'''
lowercase : Optional[Any] =[
{
'''image''': load_image(UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =dqa_pipeline(UpperCAmelCase , top_k=2 )
self.assertEqual(
UpperCAmelCase , [
[
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Dict =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
lowercase : Union[str, Any] =INVOICE_URL
lowercase : Tuple ='''How many cats are there?'''
lowercase : Optional[int] =[
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowercase : Optional[Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
lowercase : List[str] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase : Optional[Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Any =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase : int ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Dict =[]
lowercase : str =[]
lowercase : str =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , words=UpperCAmelCase , boxes=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
lowercase : Dict =INVOICE_URL
lowercase : str ='''What is the invoice number?'''
lowercase : List[str] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
lowercase : Dict =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Union[str, Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : int =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Any =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : str ) -> Dict:
'''simple docstring'''
lowercase : Any =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : Tuple =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , )
lowercase : Tuple =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Dict =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : Any =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : str =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
lowercase : Tuple =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Dict =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : Dict =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : List[Any] =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
lowercase : str =INVOICE_URL
lowercase : int ='''What is the invoice number?'''
lowercase : Tuple =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Union[str, Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
lowercase : List[str] =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Union[str, Any] =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
lowercase : Any =INVOICE_URL
lowercase : Union[str, Any] ='''What is the invoice number?'''
lowercase : int =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
pass
| 8 | 0 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
SCREAMING_SNAKE_CASE = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
SCREAMING_SNAKE_CASE = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
SCREAMING_SNAKE_CASE = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
SCREAMING_SNAKE_CASE = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
SCREAMING_SNAKE_CASE = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def lowercase_ ( __A : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase : Dict =re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __A )
return [m.group(0 ) for m in matches]
def lowercase_ ( ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] =transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase : Optional[Any] ={
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
lowercase : Optional[Any] =collections.defaultdict(__A )
lowercase : List[str] =collections.defaultdict(__A )
lowercase : Optional[Any] =collections.defaultdict(__A )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__A ):
lowercase : Tuple =None
if _re_tf_models.match(__A ) is not None:
lowercase : Tuple =tf_models
lowercase : str =_re_tf_models.match(__A ).groups()[0]
elif _re_flax_models.match(__A ) is not None:
lowercase : int =flax_models
lowercase : Dict =_re_flax_models.match(__A ).groups()[0]
elif _re_pt_models.match(__A ) is not None:
lowercase : int =pt_models
lowercase : int =_re_pt_models.match(__A ).groups()[0]
if lookup_dict is not None:
while len(__A ) > 0:
if attr_name in model_prefix_to_model_type:
lowercase : Optional[int] =True
break
# Try again after removing the last word in the name
lowercase : Tuple =''.join(camel_case_split(__A )[:-1] )
lowercase : Optional[Any] =set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
lowercase : List[Any] =list(__A )
all_models.sort()
lowercase : List[Any] ={'model_type': all_models}
lowercase : str =[pt_models[t] for t in all_models]
lowercase : List[str] =[tf_models[t] for t in all_models]
lowercase : Optional[int] =[flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
lowercase : Optional[Any] ={}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
lowercase : Any ='AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
lowercase : Optional[Any] ='AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
lowercase : Optional[int] ='AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
lowercase : Optional[Any] ='AutoTokenizer'
lowercase : Optional[int] =[processors[t] for t in all_models]
return pd.DataFrame(__A )
def lowercase_ ( __A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : str =[
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
lowercase : Any =[model_mapping, F'TF_{model_mapping}', F'FLAX_{model_mapping}']
lowercase : Optional[Any] =[auto_class, F'TF_{auto_class}', F'Flax_{auto_class}']
# Loop through all three frameworks
for module, cls, mapping in zip(__A , __A , __A ):
# The type of pipeline may not exist in this framework
if not hasattr(__A , __A ):
continue
# First extract all model_names
lowercase : Any =[]
for name in getattr(__A , __A ).values():
if isinstance(__A , __A ):
model_names.append(__A )
else:
model_names.extend(list(__A ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowercase_ ( __A : Optional[Any] , __A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] =get_frameworks_table()
lowercase : Optional[Any] =Dataset.from_pandas(__A )
lowercase : Optional[int] =hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=__A )
lowercase : Optional[int] =Dataset.from_json(__A )
lowercase : int ={
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(__A ) )
}
lowercase : int =update_pipeline_and_auto_class_table(__A )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
lowercase : Tuple =sorted(table.keys() )
lowercase : Tuple =pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
lowercase : Any =Dataset.from_pandas(__A )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__A , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(__A , '''pipeline_tags.json''' ) )
if commit_sha is not None:
lowercase : int =(
F'Update with commit {commit_sha}\n\nSee: '
F'https://github.com/huggingface/transformers/commit/{commit_sha}'
)
else:
lowercase : Tuple ='Update'
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=__A , repo_type='''dataset''' , token=__A , commit_message=__A , )
def lowercase_ ( ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[int] ={tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
lowercase : Any =transformers_module.pipelines.SUPPORTED_TASKS
lowercase : List[Any] =[]
for key in pipeline_tasks:
if key not in in_table:
lowercase : str =pipeline_tasks[key]['pt']
if isinstance(__A , (list, tuple) ):
lowercase : List[str] =model[0]
lowercase : Dict =model.__name__
if model not in in_table.values():
missing.append(__A )
if len(__A ) > 0:
lowercase : Union[str, Any] =', '.join(__A )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F'`utils/update_metadata.py`: {msg}. Please add them!' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
SCREAMING_SNAKE_CASE = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 716 |
'''simple docstring'''
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A ) , __A )
return number - int(__A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 8 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.