code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Dict = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCAmelCase : Union[str, Any] = "\nimport os\n"
_lowerCAmelCase : Optional[int] = "\ndef foo():\n import os\n return False\n"
_lowerCAmelCase : Union[str, Any] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
_lowerCAmelCase : str = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
_lowerCAmelCase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
_lowerCAmelCase : List[str] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
_lowerCAmelCase : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case__ )
def _A ( snake_case__ : List[str] , snake_case__ : Dict ):
snake_case__ : str = os.path.join(snake_case__ , '''test_file.py''' )
with open(snake_case__ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case__ )
snake_case__ : int = get_imports(snake_case__ )
assert parsed_imports == ["os"]
| 694 | 1 |
'''simple docstring'''
def _A ( snake_case__ : list[int] , snake_case__ : str ):
snake_case__ : Tuple = int(snake_case__ )
# Initialize Result
snake_case__ : List[str] = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Dict = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
_lowerCAmelCase : List[Any] = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
_lowerCAmelCase : List[str] = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
_lowerCAmelCase : Dict = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
_lowerCAmelCase : List[Any] = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(F'''Following is minimal change for {value}: ''')
_lowerCAmelCase : Union[str, Any] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 694 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'markuplm'
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=256 , lowerCamelCase=1024 , lowerCamelCase=216 , lowerCamelCase=1001 , lowerCamelCase=32 , lowerCamelCase=50 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> str:
"""simple docstring"""
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Optional[int] = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : List[Any] = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Optional[Any] = position_embedding_type
snake_case__ : Dict = use_cache
snake_case__ : int = classifier_dropout
# additional properties
snake_case__ : Union[str, Any] = max_depth
snake_case__ : Dict = max_xpath_tag_unit_embeddings
snake_case__ : Any = max_xpath_subs_unit_embeddings
snake_case__ : int = tag_pad_id
snake_case__ : Tuple = subs_pad_id
snake_case__ : Dict = xpath_unit_hidden_size
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase="None" , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ) -> int:
"""simple docstring"""
snake_case__ : List[Any] = parent
snake_case__ : str = batch_size
snake_case__ : Optional[Any] = seq_length
snake_case__ : Tuple = is_training
snake_case__ : int = use_input_mask
snake_case__ : List[str] = use_token_type_ids
snake_case__ : Any = use_labels
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : int = hidden_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : List[str] = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Tuple = max_position_embeddings
snake_case__ : Tuple = type_vocab_size
snake_case__ : List[Any] = type_sequence_label_size
snake_case__ : Optional[int] = initializer_range
snake_case__ : int = num_labels
snake_case__ : Optional[int] = num_choices
snake_case__ : List[str] = relative_attention
snake_case__ : Any = position_biased_input
snake_case__ : str = pos_att_type
snake_case__ : Tuple = scope
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = None
if self.use_input_mask:
snake_case__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : List[Any] = None
if self.use_token_type_ids:
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = None
snake_case__ : List[str] = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Dict = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[int] = TFDebertaVaModel(config=lowerCamelCase )
snake_case__ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case__ : Tuple = [input_ids, input_mask]
snake_case__ : List[str] = model(lowerCamelCase )
snake_case__ : str = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
"""simple docstring"""
snake_case__ : Tuple = TFDebertaVaForMaskedLM(config=lowerCamelCase )
snake_case__ : Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
snake_case__ : Optional[Any] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Tuple = self.num_labels
snake_case__ : Tuple = TFDebertaVaForSequenceClassification(config=lowerCamelCase )
snake_case__ : Optional[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
snake_case__ : str = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : Optional[int] = self.num_labels
snake_case__ : int = TFDebertaVaForTokenClassification(config=lowerCamelCase )
snake_case__ : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
snake_case__ : int = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[int] = TFDebertaVaForQuestionAnswering(config=lowerCamelCase )
snake_case__ : Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
snake_case__ : Dict = model(lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : List[str] = self.prepare_config_and_inputs()
(
(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,
) : int = config_and_inputs
snake_case__ : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : int = TFDebertaVaModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
@slow
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : List[str] = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(lowerCamelCase )
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
pass
@slow
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Dict = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
snake_case__ : List[str] = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
snake_case__ : List[Any] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case__ : Dict = model(lowerCamelCase , attention_mask=lowerCamelCase )[0]
snake_case__ : Dict = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCamelCase , atol=1E-4 )
| 694 |
'''simple docstring'''
def _A ( snake_case__ : float ):
return 10 - x * x
def _A ( snake_case__ : float , snake_case__ : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case__ ) * equation(snake_case__ ) >= 0:
raise ValueError('''Wrong space!''' )
snake_case__ : List[str] = a
while (b - a) >= 0.01:
# Find middle point
snake_case__ : Optional[int] = (a + b) / 2
# Check if middle point is root
if equation(snake_case__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case__ ) * equation(snake_case__ ) < 0:
snake_case__ : Dict = c
else:
snake_case__ : List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class snake_case :
"""simple docstring"""
_lowerCAmelCase = MBartConfig
_lowerCAmelCase = {}
_lowerCAmelCase = 'gelu'
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=20 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=0 , ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[Any] = parent
snake_case__ : Dict = batch_size
snake_case__ : Any = seq_length
snake_case__ : Optional[int] = is_training
snake_case__ : Tuple = use_labels
snake_case__ : List[str] = vocab_size
snake_case__ : List[str] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = max_position_embeddings
snake_case__ : Optional[int] = eos_token_id
snake_case__ : str = pad_token_id
snake_case__ : Union[str, Any] = bos_token_id
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ : int = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ : Dict = prepare_mbart_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, inputs_dict
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : Tuple = TFMBartModel(config=lowerCamelCase ).get_decoder()
snake_case__ : int = inputs_dict['''input_ids''']
snake_case__ : Union[str, Any] = input_ids[:1, :]
snake_case__ : Tuple = inputs_dict['''attention_mask'''][:1, :]
snake_case__ : Tuple = inputs_dict['''head_mask''']
snake_case__ : Any = 1
# first forward pass
snake_case__ : List[Any] = model(lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase , use_cache=lowerCamelCase )
snake_case__ ,snake_case__ : str = outputs.to_tuple()
snake_case__ : Optional[int] = past_key_values[1]
def _A ( snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int]=None , snake_case__ : Optional[Any]=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : Any=None , ):
if attention_mask is None:
snake_case__ : Tuple = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ : Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
_lowerCAmelCase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
_lowerCAmelCase = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Union[str, Any] = TFMBartModelTester(self )
snake_case__ : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = [
' UN Chief Says There Is No Military Solution in Syria',
]
_lowerCAmelCase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
_lowerCAmelCase = 'facebook/mbart-large-en-ro'
@cached_property
def lowercase__ ( self ) -> Any:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowercase__ ( self , **lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : Any = self.translate_src_text(**lowerCamelCase )
self.assertListEqual(self.expected_text , lowerCamelCase )
def lowercase__ ( self , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[int] = self.tokenizer(self.src_text , **lowerCamelCase , return_tensors='''tf''' )
snake_case__ : List[Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
snake_case__ : Any = self.tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
return generated_words
@slow
def lowercase__ ( self ) -> Any:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 694 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : list[float] , snake_case__ : list[float] ):
snake_case__ : Dict = sorted(numsa + numsa )
snake_case__ ,snake_case__ : Tuple = divmod(len(snake_case__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCAmelCase : List[str] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 694 | 1 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def _A ( snake_case__ : Union[str, Any] ):
# getting number of pixels in the image
snake_case__ ,snake_case__ : Union[str, Any] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(snake_case__ ):
for j in range(snake_case__ ):
snake_case__ : List[str] = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_lowerCAmelCase : Optional[Any] = imread("image_data/lena.jpg", 1)
# convert to its negative
_lowerCAmelCase : str = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 694 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Any = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _A ( ):
snake_case__ : str = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
snake_case__ : Dict = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(snake_case__ )
DownloadCommand.register_subcommand(snake_case__ )
EnvironmentCommand.register_subcommand(snake_case__ )
RunCommand.register_subcommand(snake_case__ )
ServeCommand.register_subcommand(snake_case__ )
UserCommands.register_subcommand(snake_case__ )
AddNewModelCommand.register_subcommand(snake_case__ )
AddNewModelLikeCommand.register_subcommand(snake_case__ )
LfsCommands.register_subcommand(snake_case__ )
PTtoTFCommand.register_subcommand(snake_case__ )
# Let's go
snake_case__ : str = parser.parse_args()
if not hasattr(snake_case__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
snake_case__ : Any = args.func(snake_case__ )
service.run()
if __name__ == "__main__":
main()
| 694 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'encoder-decoder'
_lowerCAmelCase = True
def __init__( self , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case__ : List[str] = kwargs.pop('''encoder''' )
snake_case__ : Any = encoder_config.pop('''model_type''' )
snake_case__ : List[str] = kwargs.pop('''decoder''' )
snake_case__ : str = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case__ : Tuple = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : Optional[Any] = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : str = True
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case__ : Optional[int] = True
snake_case__ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = copy.deepcopy(self.__dict__ )
snake_case__ : List[Any] = self.encoder.to_dict()
snake_case__ : str = self.decoder.to_dict()
snake_case__ : Any = self.__class__.model_type
return output
| 694 | 1 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = CodeGenTokenizer
_lowerCAmelCase = CodeGenTokenizerFast
_lowerCAmelCase = True
_lowerCAmelCase = {'add_prefix_space': True}
_lowerCAmelCase = False
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
snake_case__ : Tuple = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
snake_case__ : List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case__ : List[Any] = {'''unk_token''': '''<unk>'''}
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase ) )
def lowercase__ ( self , **lowerCamelCase ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowercase__ ( self , **lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Tuple = '''lower newer'''
snake_case__ : Union[str, Any] = '''lower newer'''
return input_text, output_text
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : int = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Any = '''lower newer'''
snake_case__ : Tuple = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
snake_case__ : List[str] = tokenizer.tokenize(lowerCamelCase , add_prefix_space=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
snake_case__ : Dict = tokens + [tokenizer.unk_token]
snake_case__ : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : Tuple = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase )
snake_case__ : Union[str, Any] = '''lower newer'''
# Testing tokenization
snake_case__ : Optional[Any] = tokenizer.tokenize(lowerCamelCase , add_prefix_space=lowerCamelCase )
snake_case__ : str = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# Testing conversion to ids without special tokens
snake_case__ : List[str] = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
snake_case__ : List[str] = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# Testing conversion to ids with special tokens
snake_case__ : List[Any] = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase )
snake_case__ : Any = tokenizer.encode(lowerCamelCase , add_prefix_space=lowerCamelCase )
snake_case__ : Any = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# Testing the unknown token
snake_case__ : List[Any] = tokens + [rust_tokenizer.unk_token]
snake_case__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
def lowercase__ ( self , *lowerCamelCase , **lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowercase__ ( self , lowerCamelCase=15 ) -> Optional[int]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
# Simple input
snake_case__ : List[str] = '''This is a simple input'''
snake_case__ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case__ : Optional[Any] = ('''This is a simple input''', '''This is a pair''')
snake_case__ : int = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowerCamelCase , tokenizer_r.encode , lowerCamelCase , max_length=lowerCamelCase , padding='''max_length''' )
# Simple input
self.assertRaises(lowerCamelCase , tokenizer_r.encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
lowerCamelCase , tokenizer_r.batch_encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(lowerCamelCase , tokenizer_r.encode , lowerCamelCase , max_length=lowerCamelCase , padding='''max_length''' )
# Pair input
self.assertRaises(lowerCamelCase , tokenizer_r.encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
lowerCamelCase , tokenizer_r.batch_encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding='''max_length''' , )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
snake_case__ : List[str] = '''This is a simple input'''
snake_case__ : Union[str, Any] = ['''This is a simple input looooooooong''', '''This is a simple input''']
snake_case__ : str = ('''This is a simple input''', '''This is a pair''')
snake_case__ : Union[str, Any] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
snake_case__ : Any = tokenizer.pad_token_id
snake_case__ : Dict = tokenizer(lowerCamelCase , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
snake_case__ : List[str] = tokenizer(lowerCamelCase , padding=lowerCamelCase , truncate=lowerCamelCase , return_tensors='''np''' )
snake_case__ : int = tokenizer(*lowerCamelCase , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
snake_case__ : Optional[Any] = tokenizer(lowerCamelCase , padding=lowerCamelCase , truncate=lowerCamelCase , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : str = '''$$$'''
snake_case__ : int = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCamelCase , add_bos_token=lowerCamelCase )
snake_case__ : Optional[int] = '''This is a simple input'''
snake_case__ : int = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case__ : Dict = tokenizer.bos_token_id
snake_case__ : Tuple = tokenizer(lowerCamelCase )
snake_case__ : str = tokenizer(lowerCamelCase )
self.assertEqual(out_s.input_ids[0] , lowerCamelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case__ : Tuple = tokenizer.decode(out_s.input_ids )
snake_case__ : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowerCamelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Dict = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
snake_case__ : Dict = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
snake_case__ : str = '''\nif len_a > len_b: result = a\nelse: result = b'''
snake_case__ : Optional[int] = tokenizer.encode(lowerCamelCase )
snake_case__ : List[str] = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
snake_case__ : Any = tokenizer.decode(lowerCamelCase , truncate_before_pattern=lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
| 694 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
_lowerCAmelCase : Dict = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
_lowerCAmelCase : str = {"vinai/bartpho-syllable": 1_0_2_4}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
snake_case__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
snake_case__ : int = vocab_file
snake_case__ : Optional[Any] = monolingual_vocab_file
snake_case__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : List[str] = cnt
cnt += 1
with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
snake_case__ : Optional[int] = line.strip().split()[0]
snake_case__ : List[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : Any = len(self.fairseq_tokens_to_ids )
snake_case__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = self.__dict__.copy()
snake_case__ : Any = None
snake_case__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : Dict = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : List[str] = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : int = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , '''wb''' ) as fi:
snake_case__ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowerCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : str = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'time_series_transformer'
_lowerCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "student_t" , lowerCamelCase = "nll" , lowerCamelCase = 1 , lowerCamelCase = [1, 2, 3, 4, 5, 6, 7] , lowerCamelCase = "mean" , lowerCamelCase = 0 , lowerCamelCase = 0 , lowerCamelCase = 0 , lowerCamelCase = 0 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = True , lowerCamelCase = "gelu" , lowerCamelCase = 64 , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 100 , lowerCamelCase = 0.02 , lowerCamelCase=True , **lowerCamelCase , ) -> Dict:
"""simple docstring"""
snake_case__ : int = prediction_length
snake_case__ : int = context_length or prediction_length
snake_case__ : int = distribution_output
snake_case__ : Optional[int] = loss
snake_case__ : Optional[int] = input_size
snake_case__ : Union[str, Any] = num_time_features
snake_case__ : str = lags_sequence
snake_case__ : Optional[Any] = scaling
snake_case__ : Tuple = num_dynamic_real_features
snake_case__ : Union[str, Any] = num_static_real_features
snake_case__ : Dict = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
snake_case__ : Any = cardinality
else:
snake_case__ : Any = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
snake_case__ : Optional[Any] = embedding_dimension
else:
snake_case__ : Any = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case__ : Any = num_parallel_samples
# Transformer architecture configuration
snake_case__ : str = input_size * len(lowerCamelCase ) + self._number_of_features
snake_case__ : Optional[Any] = d_model
snake_case__ : Tuple = encoder_attention_heads
snake_case__ : Dict = decoder_attention_heads
snake_case__ : Tuple = encoder_ffn_dim
snake_case__ : Any = decoder_ffn_dim
snake_case__ : Dict = encoder_layers
snake_case__ : Union[str, Any] = decoder_layers
snake_case__ : List[Any] = dropout
snake_case__ : List[Any] = attention_dropout
snake_case__ : Dict = activation_dropout
snake_case__ : Optional[Any] = encoder_layerdrop
snake_case__ : str = decoder_layerdrop
snake_case__ : Optional[Any] = activation_function
snake_case__ : Union[str, Any] = init_std
snake_case__ : List[str] = use_cache
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 694 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowerCAmelCase : str = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowerCAmelCase : Optional[int] = parser.parse_args()
_lowerCAmelCase : Union[str, Any] = "cpu"
_lowerCAmelCase : List[str] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowerCAmelCase : Union[str, Any] = "path-to-your-trained-model"
_lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowerCAmelCase : Optional[Any] = pipe.to(device)
# to channels last
_lowerCAmelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last)
_lowerCAmelCase : str = pipe.vae.to(memory_format=torch.channels_last)
_lowerCAmelCase : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowerCAmelCase : Optional[int] = torch.randn(2, 4, 6_4, 6_4)
_lowerCAmelCase : List[str] = torch.rand(1) * 9_9_9
_lowerCAmelCase : Optional[int] = torch.randn(2, 7_7, 7_6_8)
_lowerCAmelCase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowerCAmelCase : Tuple = 6_6_6
_lowerCAmelCase : str = torch.Generator(device).manual_seed(seed)
_lowerCAmelCase : Dict = {"generator": generator}
if args.steps is not None:
_lowerCAmelCase : Tuple = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowerCAmelCase : Any = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 694 | 1 |
'''simple docstring'''
_lowerCAmelCase : Dict = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _A ( ):
snake_case__ : Dict = input('''Enter message: ''' )
snake_case__ : Union[str, Any] = input('''Enter key [alphanumeric]: ''' )
snake_case__ : List[Any] = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
snake_case__ : List[Any] = '''encrypt'''
snake_case__ : List[str] = encrypt_message(snake_case__ , snake_case__ )
elif mode.lower().startswith('''d''' ):
snake_case__ : List[Any] = '''decrypt'''
snake_case__ : str = decrypt_message(snake_case__ , snake_case__ )
print(f'''\n{mode.title()}ed message:''' )
print(snake_case__ )
def _A ( snake_case__ : str , snake_case__ : str ):
return translate_message(snake_case__ , snake_case__ , '''encrypt''' )
def _A ( snake_case__ : str , snake_case__ : str ):
return translate_message(snake_case__ , snake_case__ , '''decrypt''' )
def _A ( snake_case__ : str , snake_case__ : str , snake_case__ : str ):
snake_case__ : Optional[int] = []
snake_case__ : Union[str, Any] = 0
snake_case__ : str = key.upper()
for symbol in message:
snake_case__ : Tuple = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case__ ):
snake_case__ : List[str] = 0
else:
translated.append(snake_case__ )
return "".join(snake_case__ )
if __name__ == "__main__":
main()
| 694 |
'''simple docstring'''
import socket
def _A ( ):
snake_case__ : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case__ : str = socket.gethostname()
snake_case__ : Union[str, Any] = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
snake_case__ : int = sock.recv(10_24 )
if not data:
break
out_file.write(snake_case__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 694 | 1 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
_lowerCAmelCase : Any = 4
_lowerCAmelCase : str = 3
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
pass
def _A ( snake_case__ : List[str] ):
for shard in shards:
for i in range(snake_case__ ):
yield {"i": i, "shard": shard}
def _A ( ):
snake_case__ : Dict = int(os.environ['''RANK'''] )
snake_case__ : str = int(os.environ['''WORLD_SIZE'''] )
snake_case__ : List[Any] = ArgumentParser()
parser.add_argument('''--streaming''' , type=snake_case__ )
parser.add_argument('''--local_rank''' , type=snake_case__ )
parser.add_argument('''--num_workers''' , type=snake_case__ , default=0 )
snake_case__ : List[str] = parser.parse_args()
snake_case__ : Any = args.streaming
snake_case__ : Any = args.num_workers
snake_case__ : Optional[int] = {'''shards''': [f'''shard_{shard_idx}''' for shard_idx in range(snake_case__ )]}
snake_case__ : Union[str, Any] = IterableDataset.from_generator(snake_case__ , gen_kwargs=snake_case__ )
if not streaming:
snake_case__ : Any = Dataset.from_list(list(snake_case__ ) )
snake_case__ : List[Any] = split_dataset_by_node(snake_case__ , rank=snake_case__ , world_size=snake_case__ )
snake_case__ : str = torch.utils.data.DataLoader(snake_case__ , num_workers=snake_case__ )
snake_case__ : str = NUM_SHARDS * NUM_ITEMS_PER_SHARD
snake_case__ : Tuple = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
snake_case__ : Dict = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 694 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
snake_case__ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_lowerCAmelCase : int = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=16 , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=14 , lowerCamelCase=10 , lowerCamelCase=19 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=True , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=[1, 2, 3, 4, 5] , lowerCamelCase=25 , lowerCamelCase=5 , ) -> Tuple:
"""simple docstring"""
snake_case__ : int = d_model
snake_case__ : List[Any] = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Dict = prediction_length
snake_case__ : Optional[int] = context_length
snake_case__ : Tuple = cardinality
snake_case__ : str = num_time_features
snake_case__ : Optional[Any] = lags_sequence
snake_case__ : Optional[Any] = embedding_dimension
snake_case__ : Dict = is_training
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : str = num_hidden_layers
snake_case__ : Dict = num_attention_heads
snake_case__ : List[str] = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = context_length
snake_case__ : List[Any] = prediction_length + label_length
snake_case__ : Optional[Any] = label_length
snake_case__ : str = moving_average
snake_case__ : Dict = autocorrelation_factor
def lowercase__ ( self ) -> str:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowercase__ ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = config.context_length + max(config.lags_sequence )
snake_case__ : int = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
snake_case__ : int = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
snake_case__ : Dict = floats_tensor([self.batch_size, _past_length] )
snake_case__ : int = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
snake_case__ : str = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, config.prediction_length] )
snake_case__ : Any = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Dict = self.get_config()
snake_case__ : Dict = self.prepare_autoformer_inputs_dict(lowerCamelCase )
return config, inputs_dict
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ ,snake_case__ : int = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Optional[int] = AutoformerModel(config=lowerCamelCase ).to(lowerCamelCase ).eval()
snake_case__ : Tuple = model(**lowerCamelCase )
snake_case__ : Optional[int] = outputs.encoder_last_hidden_state
snake_case__ : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : int = model.get_encoder()
encoder.save_pretrained(lowerCamelCase )
snake_case__ : List[str] = AutoformerEncoder.from_pretrained(lowerCamelCase ).to(lowerCamelCase )
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : Dict = model.create_network_inputs(**lowerCamelCase )
snake_case__ ,snake_case__ : List[Any] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
snake_case__ : int = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
snake_case__ : Tuple = encoder(inputs_embeds=lowerCamelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
snake_case__ : Optional[int] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
snake_case__ : List[Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
snake_case__ : List[str] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
snake_case__ : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : str = model.get_decoder()
decoder.save_pretrained(lowerCamelCase )
snake_case__ : Optional[int] = AutoformerDecoder.from_pretrained(lowerCamelCase ).to(lowerCamelCase )
snake_case__ : int = decoder(
trend=lowerCamelCase , inputs_embeds=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_lowerCAmelCase = (AutoformerForPrediction,) if is_torch_available() else ()
_lowerCAmelCase = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = AutoformerModelTester(self )
snake_case__ : int = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ ,snake_case__ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
snake_case__ ,snake_case__ : Tuple = model_class.from_pretrained(lowerCamelCase , output_loading_info=lowerCamelCase )
self.assertEqual(info['''missing_keys'''] , [] )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCamelCase )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : int = inspect.signature(getattr(lowerCamelCase , '''forward''' ) )
# The main input is the name of the argument after `self`
snake_case__ : Optional[Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(lowerCamelCase )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : int = [*signature.parameters.keys()]
snake_case__ : Optional[Any] = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(lowerCamelCase )] , lowerCamelCase )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Tuple = True
snake_case__ : Optional[Any] = getattr(self.model_tester , '''seq_length''' , lowerCamelCase )
snake_case__ : Tuple = getattr(self.model_tester , '''decoder_seq_length''' , lowerCamelCase )
snake_case__ : List[str] = getattr(self.model_tester , '''encoder_seq_length''' , lowerCamelCase )
snake_case__ : str = getattr(self.model_tester , '''d_model''' , lowerCamelCase )
snake_case__ : Optional[int] = getattr(self.model_tester , '''num_attention_heads''' , lowerCamelCase )
snake_case__ : str = d_model // num_attention_heads
for model_class in self.all_model_classes:
snake_case__ : List[str] = True
snake_case__ : int = False
snake_case__ : Optional[Any] = True
snake_case__ : Tuple = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : List[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
snake_case__ : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ : Any = True
snake_case__ : str = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : List[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
snake_case__ : Optional[int] = outputs.encoder_attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
snake_case__ : Union[str, Any] = len(lowerCamelCase )
snake_case__ : List[str] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowerCamelCase , lowerCamelCase )
# decoder attentions
snake_case__ : Tuple = outputs.decoder_attentions
self.assertIsInstance(lowerCamelCase , (list, tuple) )
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
snake_case__ : Dict = outputs.cross_attentions
self.assertIsInstance(lowerCamelCase , (list, tuple) )
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
snake_case__ : int = True
snake_case__ : str = True
snake_case__ : Optional[Any] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : str = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(out_len + 2 , len(lowerCamelCase ) )
snake_case__ : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowercase__ ( self ) -> int:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def _A ( snake_case__ : str="train-batch.pt" ):
snake_case__ : Any = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=snake_case__ , repo_type='''dataset''' )
snake_case__ : Tuple = torch.load(snake_case__ , map_location=snake_case__ )
return batch
@require_torch
@slow
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Tuple = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(lowerCamelCase )
snake_case__ : List[Any] = prepare_batch()
with torch.no_grad():
snake_case__ : int = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
snake_case__ : Optional[int] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowerCamelCase )
snake_case__ : Optional[int] = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=lowerCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Dict = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(lowerCamelCase )
snake_case__ : List[str] = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
snake_case__ : Tuple = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
snake_case__ : Any = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowerCamelCase )
snake_case__ : Dict = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=lowerCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Any = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(lowerCamelCase )
snake_case__ : Dict = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
snake_case__ : Optional[Any] = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
snake_case__ : List[str] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowerCamelCase )
snake_case__ : str = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=lowerCamelCase )
snake_case__ : List[str] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCamelCase , rtol=1E-1 ) )
| 694 |
'''simple docstring'''
from math import isqrt
def _A ( snake_case__ : int ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) )
def _A ( snake_case__ : int = 10**6 ):
snake_case__ : str = 0
snake_case__ : List[str] = 1
snake_case__ : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Tuple = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_lowerCAmelCase : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
_lowerCAmelCase : Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
_lowerCAmelCase : List[str] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 , lowerCamelCase="binary" , lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = fa_score(
lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase )
return {"f1": float(lowerCamelCase ) if score.size == 1 else score}
| 694 | 1 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _A ( snake_case__ : Tuple ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase ) -> str:
"""simple docstring"""
super().__init__()
snake_case__ : int = module
snake_case__ : Dict = nn.Sequential(
nn.Linear(module.in_features , lowerCamelCase , bias=lowerCamelCase ) , nn.Linear(lowerCamelCase , module.out_features , bias=lowerCamelCase ) , )
snake_case__ : Any = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowercase__ ( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return self.module(lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) + self.adapter(lowerCamelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = 'bigscience/bloom-1b7'
# Constant values
_lowerCAmelCase = 2.1_09_65_95_52_69_25_74
_lowerCAmelCase = 'Hello my name is'
_lowerCAmelCase = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
_lowerCAmelCase = 1_0
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(self.model_name )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
super().setUp()
# Models and tokenizer
snake_case__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
snake_case__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase , device_map='''auto''' )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = self.model_abit.config
self.assertTrue(hasattr(lowerCamelCase , '''quantization_config''' ) )
snake_case__ : Dict = config.to_dict()
snake_case__ : Dict = config.to_diff_dict()
snake_case__ : str = config.to_json_string()
def lowercase__ ( self ) -> str:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
snake_case__ : Dict = self.model_fpaa.get_memory_footprint()
snake_case__ : List[str] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
snake_case__ : List[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCamelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Any = self.tokenizer(self.input_text , return_tensors='''pt''' )
snake_case__ : List[str] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Dict = BitsAndBytesConfig()
snake_case__ : Dict = True
snake_case__ : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase , device_map='''auto''' )
snake_case__ : Optional[int] = self.tokenizer(self.input_text , return_tensors='''pt''' )
snake_case__ : Union[str, Any] = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
with self.assertRaises(lowerCamelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCamelCase )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[Any] = BitsAndBytesConfig()
with self.assertRaises(lowerCamelCase ):
snake_case__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase , load_in_abit=lowerCamelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def lowercase__ ( self ) -> str:
"""simple docstring"""
with self.assertRaises(lowerCamelCase ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(lowerCamelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCamelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(lowerCamelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCamelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
snake_case__ : Any = self.tokenizer(self.input_text , return_tensors='''pt''' )
snake_case__ : str = self.model_fpaa.to(torch.floataa )
snake_case__ : Dict = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
snake_case__ : Optional[int] = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
snake_case__ : Optional[int] = self.model_fpaa.half()
# Check this does not throw an error
snake_case__ : Dict = self.model_fpaa.float()
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : int = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=lowerCamelCase , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowercase__ ( cls ) -> str:
"""simple docstring"""
snake_case__ : List[str] = '''t5-small'''
snake_case__ : Dict = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(cls.model_name )
snake_case__ : int = '''Translate in German: Hello, my dog is cute'''
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
from transformers import TaForConditionalGeneration
snake_case__ : Dict = TaForConditionalGeneration._keep_in_fpaa_modules
snake_case__ : List[Any] = None
# test with `t5-small`
snake_case__ : str = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase , device_map='''auto''' )
snake_case__ : Any = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
snake_case__ : Union[str, Any] = model.generate(**lowerCamelCase )
# test with `flan-t5-small`
snake_case__ : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase , device_map='''auto''' )
snake_case__ : Any = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
snake_case__ : List[str] = model.generate(**lowerCamelCase )
snake_case__ : Optional[Any] = modules
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
snake_case__ : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
snake_case__ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
snake_case__ : Optional[Any] = model.generate(**lowerCamelCase )
# test with `flan-t5-small`
snake_case__ : str = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase , device_map='''auto''' )
snake_case__ : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
snake_case__ : Tuple = model.generate(**lowerCamelCase )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
super().setUp()
# model_name
snake_case__ : List[Any] = '''bigscience/bloom-560m'''
snake_case__ : Any = '''t5-small'''
# Different types of model
snake_case__ : Optional[int] = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase , device_map='''auto''' )
# Sequence classification model
snake_case__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase , device_map='''auto''' )
# CausalLM model
snake_case__ : Dict = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase , device_map='''auto''' )
# Seq2seq model
snake_case__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCamelCase , device_map='''auto''' )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
super().setUp()
def lowercase__ ( self ) -> str:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : str = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
snake_case__ : int = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
super().setUp()
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
snake_case__ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
snake_case__ : Union[str, Any] = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase ) , self.EXPECTED_OUTPUTS )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[int] = '''facebook/opt-350m'''
super().setUp()
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
snake_case__ : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
snake_case__ : Union[str, Any] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
snake_case__ : Dict = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCamelCase ) ):
snake_case__ : Optional[Any] = LoRALayer(module.q_proj , rank=16 )
snake_case__ : int = LoRALayer(module.k_proj , rank=16 )
snake_case__ : Tuple = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
snake_case__ : Optional[int] = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
snake_case__ : List[str] = model.forward(**lowerCamelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCamelCase , lowerCamelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCamelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'gpt2-xl'
_lowerCAmelCase = 3.31_91_85_48_54_15_21_87
| 694 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 42
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase = 65536 , lowerCamelCase = None , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 0 , lowerCamelCase = "fourier" , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = 0.0 , lowerCamelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase = "UNetMidBlock1D" , lowerCamelCase = None , lowerCamelCase = (32, 32, 64) , lowerCamelCase = None , lowerCamelCase = 8 , lowerCamelCase = 1 , lowerCamelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
snake_case__ : Optional[int] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase , log=lowerCamelCase , flip_sin_to_cos=lowerCamelCase )
snake_case__ : List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case__ : Dict = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase , downscale_freq_shift=lowerCamelCase )
snake_case__ : Dict = block_out_channels[0]
if use_timestep_embedding:
snake_case__ : Any = block_out_channels[0] * 4
snake_case__ : Optional[Any] = TimestepEmbedding(
in_channels=lowerCamelCase , time_embed_dim=lowerCamelCase , act_fn=lowerCamelCase , out_dim=block_out_channels[0] , )
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : List[Any] = None
snake_case__ : Union[str, Any] = nn.ModuleList([] )
snake_case__ : List[str] = None
# down
snake_case__ : Tuple = in_channels
for i, down_block_type in enumerate(lowerCamelCase ):
snake_case__ : Tuple = output_channel
snake_case__ : List[str] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case__ : List[Any] = i == len(lowerCamelCase ) - 1
snake_case__ : Dict = get_down_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase )
# mid
snake_case__ : Optional[int] = get_mid_block(
lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase , add_downsample=lowerCamelCase , )
# up
snake_case__ : Union[str, Any] = list(reversed(lowerCamelCase ) )
snake_case__ : Any = reversed_block_out_channels[0]
if out_block_type is None:
snake_case__ : List[Any] = out_channels
else:
snake_case__ : Dict = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase ):
snake_case__ : List[str] = output_channel
snake_case__ : List[str] = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels
)
snake_case__ : List[str] = i == len(lowerCamelCase ) - 1
snake_case__ : str = get_up_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase )
snake_case__ : Optional[Any] = output_channel
# out
snake_case__ : List[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
snake_case__ : Union[str, Any] = get_out_block(
out_block_type=lowerCamelCase , num_groups_out=lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase , act_fn=lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
snake_case__ : str = timestep
if not torch.is_tensor(lowerCamelCase ):
snake_case__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0:
snake_case__ : Optional[Any] = timesteps[None].to(sample.device )
snake_case__ : Any = self.time_proj(lowerCamelCase )
if self.config.use_timestep_embedding:
snake_case__ : Tuple = self.time_mlp(lowerCamelCase )
else:
snake_case__ : Union[str, Any] = timestep_embed[..., None]
snake_case__ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
snake_case__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
snake_case__ : List[Any] = ()
for downsample_block in self.down_blocks:
snake_case__ ,snake_case__ : Optional[int] = downsample_block(hidden_states=lowerCamelCase , temb=lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case__ : Any = self.mid_block(lowerCamelCase , lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
snake_case__ : str = down_block_res_samples[-1:]
snake_case__ : int = down_block_res_samples[:-1]
snake_case__ : Optional[Any] = upsample_block(lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , temb=lowerCamelCase )
# 5. post-process
if self.out_block:
snake_case__ : Dict = self.out_block(lowerCamelCase , lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase )
| 694 | 1 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = (EulerDiscreteScheduler,)
_lowerCAmelCase = 1_0
def lowercase__ ( self , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Any = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCamelCase )
return config
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Any = self.get_scheduler_config()
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : Any = self.dummy_model()
snake_case__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : List[Any] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : int = model(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Tuple = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Tuple = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Optional[Any] = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : Optional[int] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : Union[str, Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Union[str, Any] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : List[str] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Optional[int] = self.get_scheduler_config()
snake_case__ : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Tuple = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : str = model(lowerCamelCase , lowerCamelCase )
snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : int = output.prev_sample
snake_case__ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : int = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Dict = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config()
snake_case__ : List[Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Dict = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Optional[Any] = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : Dict = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
| 694 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _A ( snake_case__ : str , snake_case__ : str ):
snake_case__ : Tuple = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
snake_case__ : List[str] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
snake_case__ : str = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , snake_case__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
snake_case__ : Any = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
snake_case__ : Any = os.path.join(get_home_dir() , '''models''' )
snake_case__ : List[Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ )
snake_case__ : Optional[int] = nlp.model.BERTModel(
snake_case__ , len(snake_case__ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=snake_case__ , use_decoder=snake_case__ , )
original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ )
snake_case__ : Any = original_bort._collect_params_with_prefix()
# Build our config 🤗
snake_case__ : Union[str, Any] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(snake_case__ ),
}
snake_case__ : Dict = BertConfig.from_dict(snake_case__ )
snake_case__ : Dict = BertForMaskedLM(snake_case__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ : str ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ : List[Any] , snake_case__ : Any ):
snake_case__ : Union[str, Any] = hf_param.shape
snake_case__ : Any = to_torch(params[gluon_param] )
snake_case__ : Dict = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
snake_case__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
snake_case__ : int = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
snake_case__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
snake_case__ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
snake_case__ : str = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
snake_case__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
snake_case__ : BertSelfAttention = layer.attention.self
snake_case__ : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
snake_case__ : Dict = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
snake_case__ : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
snake_case__ : int = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
snake_case__ : List[Any] = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
snake_case__ : List[Any] = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
snake_case__ : BertSelfOutput = layer.attention.output
snake_case__ : Optional[Any] = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
snake_case__ : List[str] = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
snake_case__ : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
snake_case__ : Any = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
snake_case__ : BertIntermediate = layer.intermediate
snake_case__ : int = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
snake_case__ : Optional[int] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
snake_case__ : BertOutput = layer.output
snake_case__ : Any = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
snake_case__ : Tuple = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
snake_case__ : Tuple = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
snake_case__ : Union[str, Any] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
snake_case__ : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' )
snake_case__ : str = tokenizer.encode_plus(snake_case__ )['''input_ids''']
# Get gluon output
snake_case__ : List[str] = mx.nd.array([input_ids] )
snake_case__ : Optional[int] = original_bort(inputs=snake_case__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case__ )
snake_case__ : Optional[Any] = BertModel.from_pretrained(snake_case__ )
hf_bort_model.eval()
snake_case__ : Optional[Any] = tokenizer.encode_plus(snake_case__ , return_tensors='''pt''' )
snake_case__ : str = hf_bort_model(**snake_case__ )[0]
snake_case__ : str = output_gluon[0].asnumpy()
snake_case__ : str = output_hf[0].detach().numpy()
snake_case__ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
snake_case__ : Optional[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 694 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'encoder-decoder'
_lowerCAmelCase = True
def __init__( self , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case__ : List[str] = kwargs.pop('''encoder''' )
snake_case__ : Any = encoder_config.pop('''model_type''' )
snake_case__ : List[str] = kwargs.pop('''decoder''' )
snake_case__ : str = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case__ : Tuple = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : Optional[Any] = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : str = True
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case__ : Optional[int] = True
snake_case__ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = copy.deepcopy(self.__dict__ )
snake_case__ : List[Any] = self.encoder.to_dict()
snake_case__ : str = self.decoder.to_dict()
snake_case__ : Any = self.__class__.model_type
return output
| 694 |
'''simple docstring'''
def _A ( snake_case__ : int = 4_00_00_00 ):
snake_case__ : int = []
snake_case__ ,snake_case__ : Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case__ )
snake_case__ ,snake_case__ : Any = b, a + b
return sum(snake_case__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 1 |
'''simple docstring'''
def _A ( snake_case__ : float , snake_case__ : list[float] ):
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
snake_case__ : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(snake_case__ ) )
return round(snake_case__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : int = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
_lowerCAmelCase : Optional[int] = {
"google/pegasus-xsum": 5_1_2,
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PegasusTokenizer
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<pad>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<mask_2>" , lowerCamelCase="<mask_1>" , lowerCamelCase=None , lowerCamelCase=103 , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCamelCase )}, but is'''
f''' {type(lowerCamelCase )}''' )
snake_case__ : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCamelCase ) , self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
snake_case__ : List[Any] = additional_special_tokens_extended
else:
snake_case__ : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , pad_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , mask_token=lowerCamelCase , mask_token_sent=lowerCamelCase , offset=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Union[str, Any] = vocab_file
snake_case__ : List[Any] = False if not self.vocab_file else True
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Tuple = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : int = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 694 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , ) -> str:
"""simple docstring"""
snake_case__ : List[str] = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case__ : Union[str, Any] = parent
snake_case__ : List[Any] = batch_size
snake_case__ : Optional[Any] = num_channels
snake_case__ : Optional[int] = image_size
snake_case__ : Dict = min_resolution
snake_case__ : Dict = max_resolution
snake_case__ : Dict = do_resize
snake_case__ : Optional[Any] = size
snake_case__ : List[str] = do_normalize
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = ImageGPTImageProcessor if is_vision_available() else None
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = ImageGPTImageProcessingTester(self )
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , '''clusters''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''do_normalize''' ) )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : int = self.image_processing_class(**self.image_processor_dict )
snake_case__ : Union[str, Any] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCamelCase )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = os.path.join(lowerCamelCase , '''image_processor.json''' )
image_processor_first.to_json_file(lowerCamelCase )
snake_case__ : List[str] = self.image_processing_class.from_json_file(lowerCamelCase ).to_dict()
snake_case__ : int = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCamelCase )
snake_case__ : Any = self.image_processing_class.from_pretrained(lowerCamelCase ).to_dict()
snake_case__ : Dict = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
pass
def _A ( ):
snake_case__ : str = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
snake_case__ : Any = Image.open(dataset[4]['''file'''] )
snake_case__ : int = Image.open(dataset[5]['''file'''] )
snake_case__ : List[str] = [imagea, imagea]
return images
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Any = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
snake_case__ : List[str] = prepare_images()
# test non-batched
snake_case__ : int = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
snake_case__ : str = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCamelCase )
# test batched
snake_case__ : Optional[int] = image_processing(lowerCamelCase , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
snake_case__ : Tuple = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCamelCase )
| 694 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=0 ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[Any] = 1.0 if scale is None else scale
snake_case__ : Dict = 0.0 if loc is None else loc
super().__init__(lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase )] )
@property
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return self.variance.sqrt()
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Tuple = args_dim
snake_case__ : str = nn.ModuleList([nn.Linear(lowerCamelCase , lowerCamelCase ) for dim in args_dim.values()] )
snake_case__ : Optional[int] = domain_map
def lowercase__ ( self , lowerCamelCase ) -> Tuple[torch.Tensor]:
"""simple docstring"""
snake_case__ : Any = [proj(lowerCamelCase ) for proj in self.proj]
return self.domain_map(*lowerCamelCase )
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Tuple = function
def lowercase__ ( self , lowerCamelCase , *lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.function(lowerCamelCase , *lowerCamelCase )
class snake_case :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self , lowerCamelCase = 1 ) -> None:
"""simple docstring"""
snake_case__ : Optional[Any] = dim
snake_case__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowerCamelCase )
else:
return Independent(self.distribution_class(*lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Distribution:
"""simple docstring"""
snake_case__ : List[Any] = self._base_distribution(lowerCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase , loc=lowerCamelCase , scale=lowerCamelCase , event_dim=self.event_dim )
@property
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def lowercase__ ( self ) -> float:
"""simple docstring"""
return 0.0
def lowercase__ ( self , lowerCamelCase ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowercase__ ( self , *lowerCamelCase ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def lowercase__ ( lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Tuple = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
snake_case__ : Optional[int] = 2.0 + cls.squareplus(lowerCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowercase__ ( self , lowerCamelCase ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : str = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=2 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=10 , lowerCamelCase=3 , lowerCamelCase=32 * 4 , lowerCamelCase=32 * 6 , lowerCamelCase=4 , lowerCamelCase=32 , ) -> List[str]:
"""simple docstring"""
snake_case__ : List[str] = parent
snake_case__ : str = batch_size
snake_case__ : int = is_training
snake_case__ : Optional[Any] = use_auxiliary_loss
snake_case__ : int = num_queries
snake_case__ : str = num_channels
snake_case__ : Any = min_size
snake_case__ : Union[str, Any] = max_size
snake_case__ : Any = num_labels
snake_case__ : List[Any] = mask_feature_size
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase )
snake_case__ : Tuple = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase )
snake_case__ : Optional[int] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase ) > 0.5
).float()
snake_case__ : str = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase ) > 0.5).long()
snake_case__ : List[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : Optional[int] = self.prepare_config_and_inputs()
snake_case__ : int = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = output.encoder_hidden_states
snake_case__ : Any = output.pixel_decoder_hidden_states
snake_case__ : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase ) , config.decoder_config.decoder_layers )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
snake_case__ : Optional[Any] = MaskFormerModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Dict = model(pixel_values=lowerCamelCase , pixel_mask=lowerCamelCase )
snake_case__ : Optional[int] = model(lowerCamelCase , output_hidden_states=lowerCamelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase , lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Dict = MaskFormerForInstanceSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
def comm_check_on_output(lowerCamelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case__ : int = model(pixel_values=lowerCamelCase , pixel_mask=lowerCamelCase )
snake_case__ : List[Any] = model(lowerCamelCase )
comm_check_on_output(lowerCamelCase )
snake_case__ : int = model(
pixel_values=lowerCamelCase , pixel_mask=lowerCamelCase , mask_labels=lowerCamelCase , class_labels=lowerCamelCase )
comm_check_on_output(lowerCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_lowerCAmelCase = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Union[str, Any] = MaskFormerModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ ,snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase , **lowerCamelCase , output_hidden_states=lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCamelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowercase__ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowercase__ ( self ) -> str:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ ,snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(lowerCamelCase )
snake_case__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : int = [*signature.parameters.keys()]
snake_case__ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase )
@slow
def lowercase__ ( self ) -> str:
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
snake_case__ : Dict = MaskFormerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = (self.model_tester.min_size,) * 2
snake_case__ : int = {
'''pixel_values''': torch.randn((2, 3, *size) , device=lowerCamelCase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=lowerCamelCase ),
'''class_labels''': torch.zeros(2 , 10 , device=lowerCamelCase ).long(),
}
snake_case__ : Union[str, Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCamelCase )
snake_case__ : Dict = model(**lowerCamelCase )
self.assertTrue(outputs.loss is not None )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ ,snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase , **lowerCamelCase , output_hidden_states=lowerCamelCase )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ ,snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(lowerCamelCase ).to(lowerCamelCase )
snake_case__ : Optional[int] = model(**lowerCamelCase , output_attentions=lowerCamelCase )
self.assertTrue(outputs.attentions is not None )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
snake_case__ : List[Any] = self.all_model_classes[1]
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Union[str, Any] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
snake_case__ : Optional[int] = model(lowerCamelCase , mask_labels=lowerCamelCase , class_labels=lowerCamelCase ).loss
loss.backward()
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : List[str] = self.all_model_classes[1]
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : int = self.model_tester.prepare_config_and_inputs()
snake_case__ : Optional[int] = True
snake_case__ : Optional[int] = True
snake_case__ : Optional[int] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
snake_case__ : Optional[int] = model(lowerCamelCase , mask_labels=lowerCamelCase , class_labels=lowerCamelCase )
snake_case__ : Any = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case__ : Tuple = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
snake_case__ : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case__ : List[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowerCAmelCase : List[Any] = 1E-4
def _A ( ):
snake_case__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(lowerCamelCase )
snake_case__ : Optional[Any] = self.default_image_processor
snake_case__ : Optional[int] = prepare_img()
snake_case__ : List[Any] = image_processor(lowerCamelCase , return_tensors='''pt''' ).to(lowerCamelCase )
snake_case__ : Dict = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
snake_case__ : Optional[Any] = model(**lowerCamelCase )
snake_case__ : Optional[Any] = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
snake_case__ : Union[str, Any] = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
snake_case__ : Optional[int] = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : int = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(lowerCamelCase )
.eval()
)
snake_case__ : Any = self.default_image_processor
snake_case__ : List[Any] = prepare_img()
snake_case__ : Union[str, Any] = image_processor(lowerCamelCase , return_tensors='''pt''' ).to(lowerCamelCase )
snake_case__ : List[str] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
snake_case__ : List[Any] = model(**lowerCamelCase )
# masks_queries_logits
snake_case__ : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
snake_case__ : Union[str, Any] = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
snake_case__ : int = torch.tensor(lowerCamelCase ).to(lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
# class_queries_logits
snake_case__ : List[str] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case__ : Tuple = torch.tensor(
[
[1.6_5_1_2E0_0, -5.2_5_7_2E0_0, -3.3_5_1_9E0_0],
[3.6_1_6_9E-0_2, -5.9_0_2_5E0_0, -2.9_3_1_3E0_0],
[1.0_7_6_6E-0_4, -7.7_6_3_0E0_0, -5.1_2_6_3E0_0],
] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(lowerCamelCase )
.eval()
)
snake_case__ : List[str] = self.default_image_processor
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : str = image_processor(lowerCamelCase , return_tensors='''pt''' ).to(lowerCamelCase )
snake_case__ : Dict = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
snake_case__ : int = model(**lowerCamelCase )
# masks_queries_logits
snake_case__ : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
snake_case__ : Optional[int] = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
snake_case__ : Union[str, Any] = torch.tensor(lowerCamelCase ).to(lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
# class_queries_logits
snake_case__ : List[str] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case__ : List[Any] = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(lowerCamelCase )
.eval()
)
snake_case__ : Dict = self.default_image_processor
snake_case__ : Any = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
snake_case__ : Union[str, Any] = inputs['''pixel_values'''].to(lowerCamelCase )
snake_case__ : Union[str, Any] = [el.to(lowerCamelCase ) for el in inputs['''mask_labels''']]
snake_case__ : Optional[int] = [el.to(lowerCamelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
snake_case__ : List[str] = model(**lowerCamelCase )
self.assertTrue(outputs.loss is not None )
| 694 |
'''simple docstring'''
from math import factorial
def _A ( snake_case__ : int = 20 ):
snake_case__ : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case__ : Union[str, Any] = n // 2
return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
_lowerCAmelCase : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 694 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'markuplm'
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=256 , lowerCamelCase=1024 , lowerCamelCase=216 , lowerCamelCase=1001 , lowerCamelCase=32 , lowerCamelCase=50 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> str:
"""simple docstring"""
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Optional[int] = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : List[Any] = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Optional[Any] = position_embedding_type
snake_case__ : Dict = use_cache
snake_case__ : int = classifier_dropout
# additional properties
snake_case__ : Union[str, Any] = max_depth
snake_case__ : Dict = max_xpath_tag_unit_embeddings
snake_case__ : Any = max_xpath_subs_unit_embeddings
snake_case__ : int = tag_pad_id
snake_case__ : Tuple = subs_pad_id
snake_case__ : Dict = xpath_unit_hidden_size
| 694 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = (EulerDiscreteScheduler,)
_lowerCAmelCase = 1_0
def lowercase__ ( self , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Any = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCamelCase )
return config
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Any = self.get_scheduler_config()
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : Any = self.dummy_model()
snake_case__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : List[Any] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : int = model(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Tuple = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Tuple = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Optional[Any] = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : Optional[int] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : Union[str, Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Union[str, Any] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : List[str] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Optional[int] = self.get_scheduler_config()
snake_case__ : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Tuple = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : str = model(lowerCamelCase , lowerCamelCase )
snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : int = output.prev_sample
snake_case__ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : int = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Dict = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config()
snake_case__ : List[Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Dict = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Optional[Any] = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : Dict = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
| 694 | 1 |
'''simple docstring'''
def _A ( snake_case__ : str , snake_case__ : str ):
snake_case__ : Union[str, Any] = len(snake_case__ )
snake_case__ : Any = []
for i in range(len(snake_case__ ) - pat_len + 1 ):
snake_case__ : Optional[Any] = True
for j in range(snake_case__ ):
if s[i + j] != pattern[j]:
snake_case__ : List[Any] = False
break
if match_found:
position.append(snake_case__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 694 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = ['pixel_values']
def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : int = do_resize
snake_case__ : Dict = do_rescale
snake_case__ : Any = size_divisor
snake_case__ : str = resample
super().__init__(**lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
snake_case__ ,snake_case__ : Any = get_image_size(lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
snake_case__ : Any = height // size_divisor * size_divisor
snake_case__ : Union[str, Any] = width // size_divisor * size_divisor
snake_case__ : Tuple = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
return image
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> BatchFeature:
"""simple docstring"""
snake_case__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : Any = size_divisor if size_divisor is not None else self.size_divisor
snake_case__ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
snake_case__ : Optional[Any] = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
snake_case__ : Optional[int] = [to_numpy_array(lowerCamelCase ) for img in images]
if do_resize:
snake_case__ : Union[str, Any] = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
snake_case__ : str = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images]
snake_case__ : Tuple = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
snake_case__ : str = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 694 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : str = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Union[str, Any] = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ):
snake_case__ : List[Any] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
| 694 | 1 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_lowerCAmelCase : Optional[Any] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=1_6,
),
"generator": 2,
},
# 2048-bit
1_4: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=1_6,
),
"generator": 2,
},
# 3072-bit
1_5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=1_6,
),
"generator": 2,
},
# 4096-bit
1_6: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=1_6,
),
"generator": 2,
},
# 6144-bit
1_7: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=1_6,
),
"generator": 2,
},
# 8192-bit
1_8: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=1_6,
),
"generator": 2,
},
}
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('''Unsupported Group''' )
snake_case__ : int = primes[group]['''prime''']
snake_case__ : List[Any] = primes[group]['''generator''']
snake_case__ : Union[str, Any] = int(hexlify(urandom(32 ) ) , base=16 )
def lowercase__ ( self ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Tuple = pow(self.generator , self.__private_key , self.prime )
return hex(lowerCamelCase )[2:]
def lowercase__ ( self , lowerCamelCase ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : str = int(lowerCamelCase , base=16 )
if not self.is_valid_public_key(lowerCamelCase ):
raise ValueError('''Invalid public key''' )
snake_case__ : List[Any] = pow(lowerCamelCase , self.__private_key , self.prime )
return shaaaa(str(lowerCamelCase ).encode() ).hexdigest()
@staticmethod
def lowercase__ ( lowerCamelCase , lowerCamelCase ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase , (prime - 1) // 2 , lowerCamelCase ) == 1
)
@staticmethod
def lowercase__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 14 ) -> str:
"""simple docstring"""
snake_case__ : str = int(lowerCamelCase , base=16 )
snake_case__ : Dict = int(lowerCamelCase , base=16 )
snake_case__ : Tuple = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase , lowerCamelCase ):
raise ValueError('''Invalid public key''' )
snake_case__ : str = pow(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return shaaaa(str(lowerCamelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
snake_case__ : Optional[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
import math
def _A ( snake_case__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _A ( snake_case__ : float = 0.1 ):
snake_case__ : Dict = 3
snake_case__ : int = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(snake_case__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCAmelCase : Union[str, Any] = "\nimport os\n"
_lowerCAmelCase : Optional[int] = "\ndef foo():\n import os\n return False\n"
_lowerCAmelCase : Union[str, Any] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
_lowerCAmelCase : str = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
_lowerCAmelCase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
_lowerCAmelCase : List[str] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
_lowerCAmelCase : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case__ )
def _A ( snake_case__ : List[str] , snake_case__ : Dict ):
snake_case__ : str = os.path.join(snake_case__ , '''test_file.py''' )
with open(snake_case__ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case__ )
snake_case__ : int = get_imports(snake_case__ )
assert parsed_imports == ["os"]
| 694 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
_lowerCAmelCase : Dict = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
_lowerCAmelCase : str = {"vinai/bartpho-syllable": 1_0_2_4}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
snake_case__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
snake_case__ : int = vocab_file
snake_case__ : Optional[Any] = monolingual_vocab_file
snake_case__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : List[str] = cnt
cnt += 1
with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
snake_case__ : Optional[int] = line.strip().split()[0]
snake_case__ : List[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : Any = len(self.fairseq_tokens_to_ids )
snake_case__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = self.__dict__.copy()
snake_case__ : Any = None
snake_case__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : Dict = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : List[str] = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : int = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , '''wb''' ) as fi:
snake_case__ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowerCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 694 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'markuplm'
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=256 , lowerCamelCase=1024 , lowerCamelCase=216 , lowerCamelCase=1001 , lowerCamelCase=32 , lowerCamelCase=50 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> str:
"""simple docstring"""
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Optional[int] = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : List[Any] = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Optional[Any] = position_embedding_type
snake_case__ : Dict = use_cache
snake_case__ : int = classifier_dropout
# additional properties
snake_case__ : Union[str, Any] = max_depth
snake_case__ : Dict = max_xpath_tag_unit_embeddings
snake_case__ : Any = max_xpath_subs_unit_embeddings
snake_case__ : int = tag_pad_id
snake_case__ : Tuple = subs_pad_id
snake_case__ : Dict = xpath_unit_hidden_size
| 694 | 1 |
'''simple docstring'''
def _A ( snake_case__ : str , snake_case__ : str = " " ):
snake_case__ : List[Any] = []
snake_case__ : Union[str, Any] = 0
for index, char in enumerate(snake_case__ ):
if char == separator:
split_words.append(string[last_index:index] )
snake_case__ : List[str] = index + 1
elif index + 1 == len(snake_case__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 694 |
'''simple docstring'''
def _A ( snake_case__ : float ):
return 10 - x * x
def _A ( snake_case__ : float , snake_case__ : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case__ ) * equation(snake_case__ ) >= 0:
raise ValueError('''Wrong space!''' )
snake_case__ : List[str] = a
while (b - a) >= 0.01:
# Find middle point
snake_case__ : Optional[int] = (a + b) / 2
# Check if middle point is root
if equation(snake_case__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case__ ) * equation(snake_case__ ) < 0:
snake_case__ : Dict = c
else:
snake_case__ : List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 694 | 1 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _A ( snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[Any] ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any]=True ):
model.train()
snake_case__ : Dict = model(snake_case__ )
snake_case__ : Dict = F.mse_loss(snake_case__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case__ )
def _A ( snake_case__ : str , snake_case__ : List[Any]=False ):
set_seed(42 )
snake_case__ : Dict = RegressionModel()
snake_case__ : Tuple = deepcopy(snake_case__ )
snake_case__ : Union[str, Any] = RegressionDataset(length=80 )
snake_case__ : Optional[Any] = DataLoader(snake_case__ , batch_size=16 )
model.to(accelerator.device )
if sched:
snake_case__ : Optional[int] = AdamW(params=model.parameters() , lr=1E-3 )
snake_case__ : List[str] = AdamW(params=ddp_model.parameters() , lr=1E-3 )
snake_case__ : Optional[Any] = LambdaLR(snake_case__ , lr_lambda=lambda snake_case__ : epoch**0.65 )
snake_case__ : Dict = LambdaLR(snake_case__ , lr_lambda=lambda snake_case__ : epoch**0.65 )
# Make a copy of `model`
if sched:
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : int = accelerator.prepare(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
snake_case__ ,snake_case__ : int = accelerator.prepare(snake_case__ , snake_case__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _A ( snake_case__ : Optional[int] ):
# Test when on a single CPU or GPU that the context manager does nothing
snake_case__ ,snake_case__ ,snake_case__ : List[str] = get_training_setup(snake_case__ )
# Use a single batch
snake_case__ ,snake_case__ : List[str] = next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case__ ,snake_case__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
snake_case__ ,snake_case__ : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
snake_case__ : Dict = ddp_input[torch.randperm(len(snake_case__ ) )]
def _A ( snake_case__ : Union[str, Any] ):
# Test on distributed setup that context manager behaves properly
snake_case__ ,snake_case__ ,snake_case__ : Any = get_training_setup(snake_case__ )
# Use a single batch
snake_case__ ,snake_case__ : List[str] = next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case__ ,snake_case__ : List[str] = accelerator.gather((ddp_input, ddp_target) )
snake_case__ ,snake_case__ : Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
snake_case__ : Optional[int] = ddp_input[torch.randperm(len(snake_case__ ) )]
def _A ( snake_case__ : Optional[Any]=False , snake_case__ : Dict=False ):
snake_case__ : Any = Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case__ ,snake_case__ ,snake_case__ : Dict = get_training_setup(snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
snake_case__ ,snake_case__ : str = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case__ ,snake_case__ : Tuple = accelerator.gather((ddp_input, ddp_target) )
snake_case__ ,snake_case__ : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
snake_case__ : int = ddp_input[torch.randperm(len(snake_case__ ) )]
GradientState._reset_state()
def _A ( snake_case__ : Optional[int]=False , snake_case__ : Tuple=False ):
snake_case__ : Dict = Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : str = get_training_setup(snake_case__ , snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
snake_case__ ,snake_case__ : Tuple = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case__ ,snake_case__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
snake_case__ ,snake_case__ : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
snake_case__ : Optional[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case__ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def _A ( ):
snake_case__ : Tuple = Accelerator()
snake_case__ : Dict = RegressionDataset(length=80 )
snake_case__ : Tuple = DataLoader(snake_case__ , batch_size=16 )
snake_case__ : Tuple = RegressionDataset(length=96 )
snake_case__ : int = DataLoader(snake_case__ , batch_size=16 )
snake_case__ ,snake_case__ : Dict = accelerator.prepare(snake_case__ , snake_case__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if iteration < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if batch_num < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _A ( ):
snake_case__ : List[Any] = Accelerator()
snake_case__ : Union[str, Any] = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(snake_case__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(snake_case__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(snake_case__ , snake_case__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case__ , snake_case__ )
def _A ( snake_case__ : Tuple ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 694 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : list[float] , snake_case__ : list[float] ):
snake_case__ : Dict = sorted(numsa + numsa )
snake_case__ ,snake_case__ : Tuple = divmod(len(snake_case__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCAmelCase : List[str] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 694 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : str = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Union[str, Any] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Any = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 | 1 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _A ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : int ):
snake_case__ : int = 0
if start < end:
snake_case__ : Union[str, Any] = randint(snake_case__ , snake_case__ )
snake_case__ : Optional[int] = a[end]
snake_case__ : List[str] = a[pivot]
snake_case__ : Tuple = temp
snake_case__ ,snake_case__ : Any = _in_place_partition(snake_case__ , snake_case__ , snake_case__ )
count += _in_place_quick_sort(snake_case__ , snake_case__ , p - 1 )
count += _in_place_quick_sort(snake_case__ , p + 1 , snake_case__ )
return count
def _A ( snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
snake_case__ : Tuple = 0
snake_case__ : int = randint(snake_case__ , snake_case__ )
snake_case__ : int = a[end]
snake_case__ : List[Any] = a[pivot]
snake_case__ : str = temp
snake_case__ : str = start - 1
for index in range(snake_case__ , snake_case__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case__ : Optional[Any] = new_pivot_index + 1
snake_case__ : Dict = a[new_pivot_index]
snake_case__ : Any = a[index]
snake_case__ : str = temp
snake_case__ : Optional[int] = a[new_pivot_index + 1]
snake_case__ : Union[str, Any] = a[end]
snake_case__ : Tuple = temp
return new_pivot_index + 1, count
_lowerCAmelCase : Tuple = TemporaryFile()
_lowerCAmelCase : int = 1_0_0 # 1000 elements are to be sorted
_lowerCAmelCase , _lowerCAmelCase : List[Any] = 0, 1 # mean and standard deviation
_lowerCAmelCase : int = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_lowerCAmelCase : Optional[int] = np.load(outfile)
_lowerCAmelCase : Optional[Any] = len(M) - 1
_lowerCAmelCase : Union[str, Any] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 694 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'encoder-decoder'
_lowerCAmelCase = True
def __init__( self , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case__ : List[str] = kwargs.pop('''encoder''' )
snake_case__ : Any = encoder_config.pop('''model_type''' )
snake_case__ : List[str] = kwargs.pop('''decoder''' )
snake_case__ : str = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case__ : Tuple = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : Optional[Any] = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : str = True
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case__ : Optional[int] = True
snake_case__ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = copy.deepcopy(self.__dict__ )
snake_case__ : List[Any] = self.encoder.to_dict()
snake_case__ : str = self.decoder.to_dict()
snake_case__ : Any = self.__class__.model_type
return output
| 694 | 1 |
'''simple docstring'''
_lowerCAmelCase : List[str] = 2_5_6
# Modulus to hash a string
_lowerCAmelCase : Union[str, Any] = 1_0_0_0_0_0_3
def _A ( snake_case__ : str , snake_case__ : str ):
snake_case__ : List[str] = len(snake_case__ )
snake_case__ : Union[str, Any] = len(snake_case__ )
if p_len > t_len:
return False
snake_case__ : Optional[int] = 0
snake_case__ : str = 0
snake_case__ : Union[str, Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(snake_case__ ):
snake_case__ : int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case__ : int = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case__ : Optional[int] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case__ : List[Any] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _A ( ):
snake_case__ : Optional[int] = '''abc1abc12'''
snake_case__ : Dict = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
snake_case__ : List[str] = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(snake_case__ , snake_case__ ) and not rabin_karp(snake_case__ , snake_case__ )
# Test 2)
snake_case__ : List[Any] = '''ABABX'''
snake_case__ : Tuple = '''ABABZABABYABABX'''
assert rabin_karp(snake_case__ , snake_case__ )
# Test 3)
snake_case__ : str = '''AAAB'''
snake_case__ : str = '''ABAAAAAB'''
assert rabin_karp(snake_case__ , snake_case__ )
# Test 4)
snake_case__ : List[Any] = '''abcdabcy'''
snake_case__ : Optional[Any] = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(snake_case__ , snake_case__ )
# Test 5)
snake_case__ : Dict = '''Lü'''
snake_case__ : Tuple = '''Lüsai'''
assert rabin_karp(snake_case__ , snake_case__ )
snake_case__ : Tuple = '''Lue'''
assert not rabin_karp(snake_case__ , snake_case__ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 694 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
_lowerCAmelCase : Dict = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
_lowerCAmelCase : str = {"vinai/bartpho-syllable": 1_0_2_4}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
snake_case__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
snake_case__ : int = vocab_file
snake_case__ : Optional[Any] = monolingual_vocab_file
snake_case__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : List[str] = cnt
cnt += 1
with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
snake_case__ : Optional[int] = line.strip().split()[0]
snake_case__ : List[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : Any = len(self.fairseq_tokens_to_ids )
snake_case__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = self.__dict__.copy()
snake_case__ : Any = None
snake_case__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : Dict = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : List[str] = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : int = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , '''wb''' ) as fi:
snake_case__ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowerCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 1 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCAmelCase : Optional[Any] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowerCAmelCase : Dict = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowerCAmelCase : Tuple = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="auto" , lowerCamelCase=-1 , lowerCamelCase=0.9 , lowerCamelCase=5 , lowerCamelCase=500 , lowerCamelCase="gpt2-large" , lowerCamelCase=-1 , lowerCamelCase=1024 , lowerCamelCase=25 , lowerCamelCase=5 , lowerCamelCase=True , lowerCamelCase=25 , ) -> int:
"""simple docstring"""
snake_case__ : Dict = compute_mauve(
p_text=lowerCamelCase , q_text=lowerCamelCase , p_features=lowerCamelCase , q_features=lowerCamelCase , p_tokens=lowerCamelCase , q_tokens=lowerCamelCase , num_buckets=lowerCamelCase , pca_max_data=lowerCamelCase , kmeans_explained_var=lowerCamelCase , kmeans_num_redo=lowerCamelCase , kmeans_max_iter=lowerCamelCase , featurize_model_name=lowerCamelCase , device_id=lowerCamelCase , max_text_length=lowerCamelCase , divergence_curve_discretization_size=lowerCamelCase , mauve_scaling_factor=lowerCamelCase , verbose=lowerCamelCase , seed=lowerCamelCase , )
return out
| 694 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowerCAmelCase : str = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowerCAmelCase : Optional[int] = parser.parse_args()
_lowerCAmelCase : Union[str, Any] = "cpu"
_lowerCAmelCase : List[str] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowerCAmelCase : Union[str, Any] = "path-to-your-trained-model"
_lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowerCAmelCase : Optional[Any] = pipe.to(device)
# to channels last
_lowerCAmelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last)
_lowerCAmelCase : str = pipe.vae.to(memory_format=torch.channels_last)
_lowerCAmelCase : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowerCAmelCase : Optional[int] = torch.randn(2, 4, 6_4, 6_4)
_lowerCAmelCase : List[str] = torch.rand(1) * 9_9_9
_lowerCAmelCase : Optional[int] = torch.randn(2, 7_7, 7_6_8)
_lowerCAmelCase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowerCAmelCase : Tuple = 6_6_6
_lowerCAmelCase : str = torch.Generator(device).manual_seed(seed)
_lowerCAmelCase : Dict = {"generator": generator}
if args.steps is not None:
_lowerCAmelCase : Tuple = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowerCAmelCase : Any = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 694 | 1 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_lowerCAmelCase : int = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
_lowerCAmelCase : str = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
_lowerCAmelCase : Dict = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
_lowerCAmelCase : Dict = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
_lowerCAmelCase : Tuple = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=[1, 10, 100] , lowerCamelCase=4 , lowerCamelCase=3.0 ) -> Any:
"""simple docstring"""
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=lowerCamelCase ) as executor:
snake_case__ : Union[str, Any] = []
snake_case__ : Optional[Any] = Counter()
snake_case__ : Optional[int] = 0
snake_case__ : int = defaultdict(lowerCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase , lowerCamelCase ) ):
for candidate in candidates:
snake_case__ : int = candidate + '''\n''' + test_case
snake_case__ : Tuple = (test_program, timeout, task_id, completion_id[task_id])
snake_case__ : int = executor.submit(lowerCamelCase , *lowerCamelCase )
futures.append(lowerCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase ):
snake_case__ : int = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
snake_case__ ,snake_case__ : int = [], []
for result in results.values():
result.sort()
snake_case__ : Tuple = [r[1]['''passed'''] for r in result]
total.append(len(lowerCamelCase ) )
correct.append(sum(lowerCamelCase ) )
snake_case__ : str = np.array(lowerCamelCase )
snake_case__ : str = np.array(lowerCamelCase )
snake_case__ : List[str] = k
snake_case__ : Dict = {f'''pass@{k}''': estimate_pass_at_k(lowerCamelCase , lowerCamelCase , lowerCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _A ( snake_case__ : str , snake_case__ : int , snake_case__ : Union[str, Any] ):
def estimator(snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(snake_case__ , snake_case__ ):
snake_case__ : List[Any] = itertools.repeat(snake_case__ , len(snake_case__ ) )
else:
assert len(snake_case__ ) == len(snake_case__ )
snake_case__ : str = iter(snake_case__ )
return np.array([estimator(int(snake_case__ ) , int(snake_case__ ) , snake_case__ ) for n, c in zip(snake_case__ , snake_case__ )] )
| 694 |
'''simple docstring'''
import socket
def _A ( ):
snake_case__ : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case__ : str = socket.gethostname()
snake_case__ : Union[str, Any] = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
snake_case__ : int = sock.recv(10_24 )
if not data:
break
out_file.write(snake_case__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 694 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowerCAmelCase : Any = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class snake_case :
"""simple docstring"""
_lowerCAmelCase = PegasusConfig
_lowerCAmelCase = {}
_lowerCAmelCase = 'gelu'
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=20 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=0 , ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[Any] = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : str = seq_length
snake_case__ : Tuple = is_training
snake_case__ : Dict = use_labels
snake_case__ : Any = vocab_size
snake_case__ : List[Any] = hidden_size
snake_case__ : Dict = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : Any = intermediate_size
snake_case__ : Dict = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : int = max_position_embeddings
snake_case__ : Tuple = eos_token_id
snake_case__ : str = pad_token_id
snake_case__ : Dict = bos_token_id
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
snake_case__ : Optional[int] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ : int = np.concatenate([input_ids, eos_tensor] , axis=1 )
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ : str = prepare_pegasus_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, inputs_dict
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Union[str, Any] = 20
snake_case__ : Tuple = model_class_name(lowerCamelCase )
snake_case__ : str = model.encode(inputs_dict['''input_ids'''] )
snake_case__ ,snake_case__ : int = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
snake_case__ : Dict = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase )
snake_case__ : Dict = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
snake_case__ : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case__ : List[Any] = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
snake_case__ : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
snake_case__ : Any = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase , )
snake_case__ : Union[str, Any] = model.decode(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[int] = 20
snake_case__ : Dict = model_class_name(lowerCamelCase )
snake_case__ : Dict = model.encode(inputs_dict['''input_ids'''] )
snake_case__ ,snake_case__ : Dict = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
snake_case__ : List[str] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
snake_case__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case__ : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
snake_case__ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
snake_case__ : List[Any] = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
snake_case__ : Union[str, Any] = model.decode(lowerCamelCase , lowerCamelCase , decoder_attention_mask=lowerCamelCase )
snake_case__ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def _A ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Dict=None , snake_case__ : Dict=None , ):
if attention_mask is None:
snake_case__ : List[str] = np.not_equal(snake_case__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
snake_case__ : List[Any] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_lowerCAmelCase = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Any = FlaxPegasusModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=lowerCamelCase )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ ,snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ ,snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case__ : List[Any] = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
snake_case__ : Any = model_class(lowerCamelCase )
@jax.jit
def encode_jitted(lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ):
return model.encode(input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
snake_case__ : Dict = encode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
snake_case__ : int = encode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ ,snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case__ : List[Any] = model_class(lowerCamelCase )
snake_case__ : Dict = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
snake_case__ : Any = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return model.decode(
decoder_input_ids=lowerCamelCase , decoder_attention_mask=lowerCamelCase , encoder_outputs=lowerCamelCase , )
with self.subTest('''JIT Enabled''' ):
snake_case__ : Union[str, Any] = decode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
snake_case__ : Optional[Any] = decode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase__ ( self ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case__ : List[str] = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=lowerCamelCase )
snake_case__ : List[Any] = np.ones((1, 1) )
snake_case__ : str = model(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@slow
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Any = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
snake_case__ : Union[str, Any] = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
snake_case__ : Optional[Any] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
snake_case__ : Tuple = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
snake_case__ : Union[str, Any] = tokenizer(lowerCamelCase , return_tensors='''np''' , truncation=lowerCamelCase , max_length=512 , padding=lowerCamelCase )
snake_case__ : Optional[Any] = model.generate(**lowerCamelCase , num_beams=2 ).sequences
snake_case__ : List[Any] = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
assert tgt_text == decoded
| 694 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
snake_case__ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowerCAmelCase : Any = logging.get_logger("transformers.models.speecht5")
def _A ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Tuple ):
hf_model.apply_weight_norm()
snake_case__ : Union[str, Any] = checkpoint['''input_conv.weight_g''']
snake_case__ : Optional[Any] = checkpoint['''input_conv.weight_v''']
snake_case__ : Dict = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
snake_case__ : List[str] = checkpoint[f'''upsamples.{i}.1.weight_g''']
snake_case__ : Tuple = checkpoint[f'''upsamples.{i}.1.weight_v''']
snake_case__ : List[Any] = checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
snake_case__ : Dict = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
snake_case__ : Optional[Any] = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
snake_case__ : Dict = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
snake_case__ : Optional[Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
snake_case__ : Union[str, Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
snake_case__ : List[Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
snake_case__ : Dict = checkpoint['''output_conv.1.weight_g''']
snake_case__ : Union[str, Any] = checkpoint['''output_conv.1.weight_v''']
snake_case__ : Union[str, Any] = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def _A ( snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : str , snake_case__ : Dict=None , snake_case__ : Optional[Any]=None , ):
if config_path is not None:
snake_case__ : Optional[int] = SpeechTaHifiGanConfig.from_pretrained(snake_case__ )
else:
snake_case__ : Optional[int] = SpeechTaHifiGanConfig()
snake_case__ : Dict = SpeechTaHifiGan(snake_case__ )
snake_case__ : Optional[int] = torch.load(snake_case__ )
load_weights(orig_checkpoint['''model''']['''generator'''] , snake_case__ , snake_case__ )
snake_case__ : Optional[int] = np.load(snake_case__ )
snake_case__ : Optional[int] = stats[0].reshape(-1 )
snake_case__ : Union[str, Any] = stats[1].reshape(-1 )
snake_case__ : List[str] = torch.from_numpy(snake_case__ ).float()
snake_case__ : List[str] = torch.from_numpy(snake_case__ ).float()
model.save_pretrained(snake_case__ )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 694 |
'''simple docstring'''
from math import isqrt
def _A ( snake_case__ : int ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) )
def _A ( snake_case__ : int = 10**6 ):
snake_case__ : str = 0
snake_case__ : List[str] = 1
snake_case__ : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 1 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase : int = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase : Any = logging.getLogger()
def _A ( ):
snake_case__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
snake_case__ : Union[str, Any] = parser.parse_args()
return args.f
def _A ( snake_case__ : List[Any] , snake_case__ : Dict="eval" ):
snake_case__ : Any = os.path.join(snake_case__ , f'''{split}_results.json''' )
if os.path.exists(snake_case__ ):
with open(snake_case__ , '''r''' ) as f:
return json.load(snake_case__ )
raise ValueError(f'''can\'t find {path}''' )
_lowerCAmelCase : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Dict = self.get_auto_remove_tmp_dir()
snake_case__ : Union[str, Any] = f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowerCamelCase , '''argv''' , lowerCamelCase ):
run_flax_glue.main()
snake_case__ : List[Any] = get_results(lowerCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : str = self.get_auto_remove_tmp_dir()
snake_case__ : List[str] = f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowerCamelCase , '''argv''' , lowerCamelCase ):
run_clm_flax.main()
snake_case__ : Optional[int] = get_results(lowerCamelCase )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Any = self.get_auto_remove_tmp_dir()
snake_case__ : int = f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(lowerCamelCase , '''argv''' , lowerCamelCase ):
run_summarization_flax.main()
snake_case__ : Optional[int] = get_results(lowerCamelCase , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Optional[int] = self.get_auto_remove_tmp_dir()
snake_case__ : Dict = f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(lowerCamelCase , '''argv''' , lowerCamelCase ):
run_mlm_flax.main()
snake_case__ : Dict = get_results(lowerCamelCase )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Dict = self.get_auto_remove_tmp_dir()
snake_case__ : Any = f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowerCamelCase , '''argv''' , lowerCamelCase ):
run_ta_mlm_flax.main()
snake_case__ : Optional[Any] = get_results(lowerCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Tuple = 7 if get_gpu_count() > 1 else 2
snake_case__ : Optional[Any] = self.get_auto_remove_tmp_dir()
snake_case__ : List[str] = f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(lowerCamelCase , '''argv''' , lowerCamelCase ):
run_flax_ner.main()
snake_case__ : Optional[Any] = get_results(lowerCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case__ : Any = f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(lowerCamelCase , '''argv''' , lowerCamelCase ):
run_qa.main()
snake_case__ : Tuple = get_results(lowerCamelCase )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 694 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_lowerCAmelCase : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
_lowerCAmelCase : Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
_lowerCAmelCase : List[str] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 , lowerCamelCase="binary" , lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = fa_score(
lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase )
return {"f1": float(lowerCamelCase ) if score.size == 1 else score}
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> None:
"""simple docstring"""
snake_case__ : Optional[int] = order
# a_{0} ... a_{k}
snake_case__ : Union[str, Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
snake_case__ : Dict = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
snake_case__ : List[str] = [0.0] * self.order
# y[n-1] ... y[n-k]
snake_case__ : Union[str, Any] = [0.0] * self.order
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
if len(lowerCamelCase ) < self.order:
snake_case__ : Dict = [1.0, *a_coeffs]
if len(lowerCamelCase ) != self.order + 1:
snake_case__ : int = (
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
if len(lowerCamelCase ) != self.order + 1:
snake_case__ : str = (
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
snake_case__ : Dict = a_coeffs
snake_case__ : int = b_coeffs
def lowercase__ ( self , lowerCamelCase ) -> float:
"""simple docstring"""
snake_case__ : int = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
snake_case__ : Tuple = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
snake_case__ : int = self.input_history[:-1]
snake_case__ : Optional[int] = self.output_history[:-1]
snake_case__ : int = sample
snake_case__ : Dict = result
return result
| 694 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 42
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase = 65536 , lowerCamelCase = None , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 0 , lowerCamelCase = "fourier" , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = 0.0 , lowerCamelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase = "UNetMidBlock1D" , lowerCamelCase = None , lowerCamelCase = (32, 32, 64) , lowerCamelCase = None , lowerCamelCase = 8 , lowerCamelCase = 1 , lowerCamelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
snake_case__ : Optional[int] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase , log=lowerCamelCase , flip_sin_to_cos=lowerCamelCase )
snake_case__ : List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case__ : Dict = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase , downscale_freq_shift=lowerCamelCase )
snake_case__ : Dict = block_out_channels[0]
if use_timestep_embedding:
snake_case__ : Any = block_out_channels[0] * 4
snake_case__ : Optional[Any] = TimestepEmbedding(
in_channels=lowerCamelCase , time_embed_dim=lowerCamelCase , act_fn=lowerCamelCase , out_dim=block_out_channels[0] , )
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : List[Any] = None
snake_case__ : Union[str, Any] = nn.ModuleList([] )
snake_case__ : List[str] = None
# down
snake_case__ : Tuple = in_channels
for i, down_block_type in enumerate(lowerCamelCase ):
snake_case__ : Tuple = output_channel
snake_case__ : List[str] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case__ : List[Any] = i == len(lowerCamelCase ) - 1
snake_case__ : Dict = get_down_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase )
# mid
snake_case__ : Optional[int] = get_mid_block(
lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase , add_downsample=lowerCamelCase , )
# up
snake_case__ : Union[str, Any] = list(reversed(lowerCamelCase ) )
snake_case__ : Any = reversed_block_out_channels[0]
if out_block_type is None:
snake_case__ : List[Any] = out_channels
else:
snake_case__ : Dict = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase ):
snake_case__ : List[str] = output_channel
snake_case__ : List[str] = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels
)
snake_case__ : List[str] = i == len(lowerCamelCase ) - 1
snake_case__ : str = get_up_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase )
snake_case__ : Optional[Any] = output_channel
# out
snake_case__ : List[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
snake_case__ : Union[str, Any] = get_out_block(
out_block_type=lowerCamelCase , num_groups_out=lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase , act_fn=lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
snake_case__ : str = timestep
if not torch.is_tensor(lowerCamelCase ):
snake_case__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0:
snake_case__ : Optional[Any] = timesteps[None].to(sample.device )
snake_case__ : Any = self.time_proj(lowerCamelCase )
if self.config.use_timestep_embedding:
snake_case__ : Tuple = self.time_mlp(lowerCamelCase )
else:
snake_case__ : Union[str, Any] = timestep_embed[..., None]
snake_case__ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
snake_case__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
snake_case__ : List[Any] = ()
for downsample_block in self.down_blocks:
snake_case__ ,snake_case__ : Optional[int] = downsample_block(hidden_states=lowerCamelCase , temb=lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case__ : Any = self.mid_block(lowerCamelCase , lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
snake_case__ : str = down_block_res_samples[-1:]
snake_case__ : int = down_block_res_samples[:-1]
snake_case__ : Optional[Any] = upsample_block(lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , temb=lowerCamelCase )
# 5. post-process
if self.out_block:
snake_case__ : Dict = self.out_block(lowerCamelCase , lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase )
| 694 | 1 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = (KDPMaDiscreteScheduler,)
_lowerCAmelCase = 1_0
def lowercase__ ( self , **lowerCamelCase ) -> Any:
"""simple docstring"""
snake_case__ : int = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCamelCase )
return config
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowercase__ ( self ) -> int:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : int = self.scheduler_classes[0]
snake_case__ : Dict = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Union[str, Any] = self.dummy_model()
snake_case__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : Any = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : Optional[Any] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : str = model(lowerCamelCase , lowerCamelCase )
snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case__ : Dict = output.prev_sample
snake_case__ : Optional[int] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Optional[Any] = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4E-0_7 ) < 1E-2
assert abs(result_mean.item() - 6.1_1_1_2E-1_0 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2E-0_7 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
if torch_device == "mps":
return
snake_case__ : Union[str, Any] = self.scheduler_classes[0]
snake_case__ : List[Any] = self.get_scheduler_config()
snake_case__ : Any = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : List[str] = self.dummy_model()
snake_case__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : str = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : Optional[Any] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : Any = model(lowerCamelCase , lowerCamelCase )
snake_case__ : Any = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = output.prev_sample
snake_case__ : List[Any] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Dict = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
if torch_device == "mps":
return
snake_case__ : List[str] = self.scheduler_classes[0]
snake_case__ : List[str] = self.get_scheduler_config()
snake_case__ : List[Any] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : List[str] = self.dummy_model()
snake_case__ : Dict = self.dummy_sample_deter.to(lowerCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
snake_case__ : Optional[int] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : Any = model(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : List[Any] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Optional[Any] = torch.mean(torch.abs(lowerCamelCase ) )
if str(lowerCamelCase ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 694 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _A ( snake_case__ : str , snake_case__ : str ):
snake_case__ : Tuple = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
snake_case__ : List[str] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
snake_case__ : str = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , snake_case__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
snake_case__ : Any = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
snake_case__ : Any = os.path.join(get_home_dir() , '''models''' )
snake_case__ : List[Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ )
snake_case__ : Optional[int] = nlp.model.BERTModel(
snake_case__ , len(snake_case__ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=snake_case__ , use_decoder=snake_case__ , )
original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ )
snake_case__ : Any = original_bort._collect_params_with_prefix()
# Build our config 🤗
snake_case__ : Union[str, Any] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(snake_case__ ),
}
snake_case__ : Dict = BertConfig.from_dict(snake_case__ )
snake_case__ : Dict = BertForMaskedLM(snake_case__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ : str ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ : List[Any] , snake_case__ : Any ):
snake_case__ : Union[str, Any] = hf_param.shape
snake_case__ : Any = to_torch(params[gluon_param] )
snake_case__ : Dict = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
snake_case__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
snake_case__ : int = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
snake_case__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
snake_case__ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
snake_case__ : str = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
snake_case__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
snake_case__ : BertSelfAttention = layer.attention.self
snake_case__ : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
snake_case__ : Dict = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
snake_case__ : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
snake_case__ : int = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
snake_case__ : List[Any] = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
snake_case__ : List[Any] = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
snake_case__ : BertSelfOutput = layer.attention.output
snake_case__ : Optional[Any] = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
snake_case__ : List[str] = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
snake_case__ : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
snake_case__ : Any = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
snake_case__ : BertIntermediate = layer.intermediate
snake_case__ : int = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
snake_case__ : Optional[int] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
snake_case__ : BertOutput = layer.output
snake_case__ : Any = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
snake_case__ : Tuple = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
snake_case__ : Tuple = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
snake_case__ : Union[str, Any] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
snake_case__ : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' )
snake_case__ : str = tokenizer.encode_plus(snake_case__ )['''input_ids''']
# Get gluon output
snake_case__ : List[str] = mx.nd.array([input_ids] )
snake_case__ : Optional[int] = original_bort(inputs=snake_case__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case__ )
snake_case__ : Optional[Any] = BertModel.from_pretrained(snake_case__ )
hf_bort_model.eval()
snake_case__ : Optional[Any] = tokenizer.encode_plus(snake_case__ , return_tensors='''pt''' )
snake_case__ : str = hf_bort_model(**snake_case__ )[0]
snake_case__ : str = output_gluon[0].asnumpy()
snake_case__ : str = output_hf[0].detach().numpy()
snake_case__ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
snake_case__ : Optional[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 694 | 1 |
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=64 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , lowerCamelCase=2 , lowerCamelCase=2 , lowerCamelCase=2 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=1 , ) -> Any:
"""simple docstring"""
snake_case__ : Optional[int] = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Any = seq_length
snake_case__ : str = is_training
snake_case__ : Optional[Any] = use_input_mask
snake_case__ : List[str] = use_token_type_ids
snake_case__ : Union[str, Any] = use_labels
snake_case__ : Optional[int] = vocab_size
snake_case__ : Any = hidden_size
snake_case__ : str = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : str = intermediate_size
snake_case__ : Dict = hidden_act
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : Union[str, Any] = type_vocab_size
snake_case__ : int = type_sequence_label_size
snake_case__ : str = initializer_range
snake_case__ : str = num_labels
snake_case__ : Optional[Any] = num_choices
snake_case__ : Optional[int] = scope
snake_case__ : Dict = q_groups
snake_case__ : List[Any] = k_groups
snake_case__ : List[Any] = v_groups
snake_case__ : int = post_attention_groups
snake_case__ : Optional[int] = intermediate_groups
snake_case__ : Any = output_groups
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : List[str] = None
if self.use_input_mask:
snake_case__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Any = None
snake_case__ : Tuple = None
snake_case__ : Optional[int] = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[str] = SqueezeBertModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Dict = model(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : int = SqueezeBertForMaskedLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Dict = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Tuple = SqueezeBertForQuestionAnswering(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : List[Any] = model(
lowerCamelCase , attention_mask=lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = self.num_labels
snake_case__ : Tuple = SqueezeBertForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : int = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Tuple = self.num_labels
snake_case__ : List[Any] = SqueezeBertForTokenClassification(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : List[Any] = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.num_choices
snake_case__ : str = SqueezeBertForMultipleChoice(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : List[Any] = model(
lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
((snake_case__) ,(snake_case__) ,(snake_case__) ,(snake_case__) ,(snake_case__) ,(snake_case__)) : Dict = config_and_inputs
snake_case__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_lowerCAmelCase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = False
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[Any] = SqueezeBertModelTester(self )
snake_case__ : Tuple = ConfigTester(self , config_class=lowerCamelCase , dim=37 )
def lowercase__ ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase )
@slow
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : List[Any] = SqueezeBertModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
snake_case__ : int = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
snake_case__ : str = model(lowerCamelCase )[0]
snake_case__ : Dict = torch.Size((1, 3) )
self.assertEqual(output.shape , lowerCamelCase )
snake_case__ : Optional[Any] = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-4 ) )
| 694 |
'''simple docstring'''
def _A ( snake_case__ : int = 4_00_00_00 ):
snake_case__ : int = []
snake_case__ ,snake_case__ : Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case__ )
snake_case__ ,snake_case__ : Any = b, a + b
return sum(snake_case__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
_lowerCAmelCase : int = [8, 5, 9, 7]
_lowerCAmelCase : Optional[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_lowerCAmelCase : Optional[int] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : List[Any] = claim_vector
snake_case__ : Dict = allocated_resources_table
snake_case__ : Optional[Any] = maximum_claim_table
def lowercase__ ( self ) -> list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def lowercase__ ( self ) -> list[int]:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def lowercase__ ( self ) -> list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def lowercase__ ( self ) -> dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(lowerCamelCase ): i for i in self.__need()}
def lowercase__ ( self , **lowerCamelCase ) -> None:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.__need()
snake_case__ : int = self.__allocated_resources_table
snake_case__ : str = self.__available_resources()
snake_case__ : int = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
snake_case__ : Any = False
for each_need in need_list:
snake_case__ : Optional[int] = True
for index, need in enumerate(lowerCamelCase ):
if need > available_resources[index]:
snake_case__ : Optional[Any] = False
break
if execution:
snake_case__ : str = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
snake_case__ : List[Any] = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(lowerCamelCase )
# update available/freed resources stack
snake_case__ : Optional[int] = np.array(lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def lowercase__ ( self ) -> int:
"""simple docstring"""
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(lowerCamelCase ) + 1}'''
+ ''' '''.join(f'''{it:>8}''' for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(lowerCamelCase ) + 1}'''
+ ''' '''.join(f'''{it:>8}''' for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(lowerCamelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : int = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
_lowerCAmelCase : Optional[int] = {
"google/pegasus-xsum": 5_1_2,
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PegasusTokenizer
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<pad>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<mask_2>" , lowerCamelCase="<mask_1>" , lowerCamelCase=None , lowerCamelCase=103 , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCamelCase )}, but is'''
f''' {type(lowerCamelCase )}''' )
snake_case__ : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCamelCase ) , self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
snake_case__ : List[Any] = additional_special_tokens_extended
else:
snake_case__ : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , pad_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , mask_token=lowerCamelCase , mask_token_sent=lowerCamelCase , offset=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Union[str, Any] = vocab_file
snake_case__ : List[Any] = False if not self.vocab_file else True
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Tuple = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : int = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 694 | 1 |
'''simple docstring'''
import numpy as np
def _A ( snake_case__ : np.ndarray , snake_case__ : np.ndarray , snake_case__ : float = 1E-12 , snake_case__ : int = 1_00 , ):
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case__ ) == np.iscomplexobj(snake_case__ )
snake_case__ : List[Any] = np.iscomplexobj(snake_case__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
snake_case__ : Dict = False
snake_case__ : List[str] = 0
snake_case__ : Union[str, Any] = 0
snake_case__ : Optional[Any] = 1E12
while not convergence:
# Multiple matrix by the vector.
snake_case__ : Optional[Any] = np.dot(snake_case__ , snake_case__ )
# Normalize the resulting output vector.
snake_case__ : List[Any] = w / np.linalg.norm(snake_case__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
snake_case__ : Any = vector.conj().T if is_complex else vector.T
snake_case__ : int = np.dot(snake_case__ , np.dot(snake_case__ , snake_case__ ) )
# Check convergence.
snake_case__ : int = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
snake_case__ : Tuple = True
snake_case__ : Optional[Any] = lambda_
if is_complex:
snake_case__ : Dict = np.real(lambda_ )
return lambda_, vector
def _A ( ):
snake_case__ : int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
snake_case__ : Dict = np.array([41, 4, 20] )
snake_case__ : Union[str, Any] = real_input_matrix.astype(np.complexaaa )
snake_case__ : List[Any] = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
snake_case__ : Dict = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
snake_case__ : Union[str, Any] = real_input_matrix
snake_case__ : Union[str, Any] = real_vector
elif problem_type == "complex":
snake_case__ : List[Any] = complex_input_matrix
snake_case__ : List[Any] = complex_vector
# Our implementation.
snake_case__ ,snake_case__ : Dict = power_iteration(snake_case__ , snake_case__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
snake_case__ ,snake_case__ : List[Any] = np.linalg.eigh(snake_case__ )
# Last eigenvalue is the maximum one.
snake_case__ : Any = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
snake_case__ : Union[str, Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case__ ) - np.abs(snake_case__ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 694 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=0 ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[Any] = 1.0 if scale is None else scale
snake_case__ : Dict = 0.0 if loc is None else loc
super().__init__(lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase )] )
@property
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return self.variance.sqrt()
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Tuple = args_dim
snake_case__ : str = nn.ModuleList([nn.Linear(lowerCamelCase , lowerCamelCase ) for dim in args_dim.values()] )
snake_case__ : Optional[int] = domain_map
def lowercase__ ( self , lowerCamelCase ) -> Tuple[torch.Tensor]:
"""simple docstring"""
snake_case__ : Any = [proj(lowerCamelCase ) for proj in self.proj]
return self.domain_map(*lowerCamelCase )
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Tuple = function
def lowercase__ ( self , lowerCamelCase , *lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.function(lowerCamelCase , *lowerCamelCase )
class snake_case :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self , lowerCamelCase = 1 ) -> None:
"""simple docstring"""
snake_case__ : Optional[Any] = dim
snake_case__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowerCamelCase )
else:
return Independent(self.distribution_class(*lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Distribution:
"""simple docstring"""
snake_case__ : List[Any] = self._base_distribution(lowerCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase , loc=lowerCamelCase , scale=lowerCamelCase , event_dim=self.event_dim )
@property
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def lowercase__ ( self ) -> float:
"""simple docstring"""
return 0.0
def lowercase__ ( self , lowerCamelCase ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowercase__ ( self , *lowerCamelCase ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def lowercase__ ( lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Tuple = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
snake_case__ : Optional[int] = 2.0 + cls.squareplus(lowerCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowercase__ ( self , lowerCamelCase ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : str = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 | 1 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
_lowerCAmelCase : Any = logging.getLogger(__name__)
def _A ( snake_case__ : str ):
snake_case__ : Tuple = git.Repo(search_parent_directories=snake_case__ )
snake_case__ : Tuple = {
'''repo_id''': str(snake_case__ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(snake_case__ , '''git_log.json''' ) , '''w''' ) as f:
json.dump(snake_case__ , snake_case__ , indent=4 )
def _A ( snake_case__ : Dict ):
if params.n_gpu <= 0:
snake_case__ : List[str] = 0
snake_case__ : Optional[Any] = -1
snake_case__ : List[Any] = True
snake_case__ : Tuple = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
snake_case__ : Union[str, Any] = int(os.environ['''WORLD_SIZE'''] )
snake_case__ : Union[str, Any] = int(os.environ['''N_GPU_NODE'''] )
snake_case__ : List[Any] = int(os.environ['''RANK'''] )
# number of nodes / node ID
snake_case__ : Optional[Any] = params.world_size // params.n_gpu_per_node
snake_case__ : Dict = params.global_rank // params.n_gpu_per_node
snake_case__ : int = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
snake_case__ : List[Any] = 1
snake_case__ : Tuple = 0
snake_case__ : str = 0
snake_case__ : str = 0
snake_case__ : Union[str, Any] = 1
snake_case__ : Optional[int] = 1
snake_case__ : Tuple = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
snake_case__ : Tuple = params.node_id == 0 and params.local_rank == 0
snake_case__ : Optional[Any] = params.n_nodes > 1
# summary
snake_case__ : Optional[Any] = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def _A ( snake_case__ : List[str] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 694 |
'''simple docstring'''
from math import factorial
def _A ( snake_case__ : int = 20 ):
snake_case__ : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case__ : Union[str, Any] = n // 2
return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
_lowerCAmelCase : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 694 | 1 |
'''simple docstring'''
from math import isqrt
def _A ( snake_case__ : int ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) )
def _A ( snake_case__ : int = 10**6 ):
snake_case__ : str = 0
snake_case__ : List[str] = 1
snake_case__ : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = (EulerDiscreteScheduler,)
_lowerCAmelCase = 1_0
def lowercase__ ( self , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Any = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCamelCase )
return config
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Any = self.get_scheduler_config()
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : Any = self.dummy_model()
snake_case__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : List[Any] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : int = model(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Tuple = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Tuple = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Optional[Any] = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : Optional[int] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : Union[str, Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Union[str, Any] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : List[str] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Optional[int] = self.get_scheduler_config()
snake_case__ : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Tuple = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : str = model(lowerCamelCase , lowerCamelCase )
snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : int = output.prev_sample
snake_case__ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : int = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Dict = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config()
snake_case__ : List[Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Dict = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Optional[Any] = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : Dict = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
| 694 | 1 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[int] = 0
@slow
def lowercase__ ( self ) -> int:
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
snake_case__ : Dict = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowerCamelCase ) , 0 )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Dict = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
# Check that tokenizer_type ≠ model_type
snake_case__ : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase , config=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(lowerCamelCase , '''vocab.txt''' ) )
snake_case__ : int = AutoTokenizer.from_pretrained(lowerCamelCase , tokenizer_type='''bert''' , use_fast=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(lowerCamelCase , '''merges.txt''' ) )
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase , tokenizer_type='''gpt2''' , use_fast=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@require_tokenizers
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(lowerCamelCase , '''vocab.txt''' ) )
snake_case__ : str = AutoTokenizer.from_pretrained(lowerCamelCase , tokenizer_type='''bert''' )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(lowerCamelCase , '''merges.txt''' ) )
snake_case__ : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
with pytest.raises(lowerCamelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase__ ( self ) -> Any:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
snake_case__ : List[str] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowerCamelCase , lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase__ ( self ) -> Any:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowerCamelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
snake_case__ : Tuple = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : List[Any] = TOKENIZER_MAPPING.values()
snake_case__ : Dict = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowerCamelCase )
@require_tokenizers
def lowercase__ ( self ) -> Any:
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase ) , lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , lowerCamelCase )
@require_tokenizers
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Any = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=lowerCamelCase )
snake_case__ : List[Any] = '''Hello, world. How are you?'''
snake_case__ : Dict = tokenizer.tokenize(lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
snake_case__ : Dict = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=lowerCamelCase )
snake_case__ : int = tokenizer.tokenize(lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase )
snake_case__ : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : str = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = get_tokenizer_config('''bert-base-cased''' )
snake_case__ : Dict = config.pop('''_commit_hash''' , lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowerCamelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
snake_case__ : int = get_tokenizer_config(lowerCamelCase )
self.assertDictEqual(lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
snake_case__ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase )
snake_case__ : Dict = get_tokenizer_config(lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , lowerCamelCase )
AutoTokenizer.register(lowerCamelCase , slow_tokenizer_class=lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase ):
AutoTokenizer.register(lowerCamelCase , slow_tokenizer_class=lowerCamelCase )
snake_case__ : Dict = CustomTokenizer.from_pretrained(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase )
snake_case__ : str = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(lowerCamelCase , slow_tokenizer_class=lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowerCamelCase , fast_tokenizer_class=lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowerCamelCase , slow_tokenizer_class=lowerCamelCase , fast_tokenizer_class=lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase ):
AutoTokenizer.register(lowerCamelCase , fast_tokenizer_class=lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Tuple = BertTokenizerFast.from_pretrained(lowerCamelCase )
bert_tokenizer.save_pretrained(lowerCamelCase )
snake_case__ : List[Any] = CustomTokenizerFast.from_pretrained(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase )
snake_case__ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase , use_fast=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaises(lowerCamelCase ):
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase ):
snake_case__ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase )
snake_case__ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase )
snake_case__ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase , trust_remote_code=lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
snake_case__ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase , use_fast=lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase )
snake_case__ : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase , trust_remote_code=lowerCamelCase , use_fast=lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = False
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = NewTokenizer
_lowerCAmelCase = False
try:
AutoConfig.register('''custom''' , lowerCamelCase )
AutoTokenizer.register(lowerCamelCase , slow_tokenizer_class=lowerCamelCase )
AutoTokenizer.register(lowerCamelCase , fast_tokenizer_class=lowerCamelCase )
# If remote code is not set, the default is to use local
snake_case__ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
snake_case__ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
snake_case__ : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase , use_fast=lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase , use_fast=lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
snake_case__ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=lowerCamelCase , use_fast=lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
snake_case__ : Dict = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase__ ( self ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case__ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase , revision='''aaaaaa''' )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 694 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = ['pixel_values']
def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : int = do_resize
snake_case__ : Dict = do_rescale
snake_case__ : Any = size_divisor
snake_case__ : str = resample
super().__init__(**lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
snake_case__ ,snake_case__ : Any = get_image_size(lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
snake_case__ : Any = height // size_divisor * size_divisor
snake_case__ : Union[str, Any] = width // size_divisor * size_divisor
snake_case__ : Tuple = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
return image
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> BatchFeature:
"""simple docstring"""
snake_case__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : Any = size_divisor if size_divisor is not None else self.size_divisor
snake_case__ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
snake_case__ : Optional[Any] = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
snake_case__ : Optional[int] = [to_numpy_array(lowerCamelCase ) for img in images]
if do_resize:
snake_case__ : Union[str, Any] = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
snake_case__ : str = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images]
snake_case__ : Tuple = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
snake_case__ : str = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 694 | 1 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_lowerCAmelCase : Dict = 2_9_9_7_9_2_4_5_8
# Symbols
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = symbols("ct x y z")
def _A ( snake_case__ : float ):
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def _A ( snake_case__ : float ):
return 1 / sqrt(1 - beta(snake_case__ ) ** 2 )
def _A ( snake_case__ : float ):
return np.array(
[
[gamma(snake_case__ ), -gamma(snake_case__ ) * beta(snake_case__ ), 0, 0],
[-gamma(snake_case__ ) * beta(snake_case__ ), gamma(snake_case__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def _A ( snake_case__ : float , snake_case__ : np.ndarray | None = None ):
# Ensure event is not empty
if event is None:
snake_case__ : str = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(snake_case__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_lowerCAmelCase : Optional[int] = transform(2_9_9_7_9_2_4_5)
print("Example of four vector: ")
print(F'''ct\' = {four_vector[0]}''')
print(F'''x\' = {four_vector[1]}''')
print(F'''y\' = {four_vector[2]}''')
print(F'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
_lowerCAmelCase : Any = {ct: c, x: 1, y: 1, z: 1}
_lowerCAmelCase : Dict = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'''\n{numerical_vector}''')
| 694 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ):
snake_case__ : List[Any] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
| 694 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def lowercase__ ( *lowerCamelCase , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : str = ObjectDetectionPipeline(model=lowerCamelCase , image_processor=lowerCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Optional[Any] = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(lowerCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase , {
'''score''': ANY(lowerCamelCase ),
'''label''': ANY(lowerCamelCase ),
'''box''': {'''xmin''': ANY(lowerCamelCase ), '''ymin''': ANY(lowerCamelCase ), '''xmax''': ANY(lowerCamelCase ), '''ymax''': ANY(lowerCamelCase )},
} , )
import datasets
snake_case__ : Any = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
snake_case__ : Optional[int] = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
snake_case__ : int = object_detector(lowerCamelCase , threshold=0.0 )
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(lowerCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase , {
'''score''': ANY(lowerCamelCase ),
'''label''': ANY(lowerCamelCase ),
'''box''': {'''xmin''': ANY(lowerCamelCase ), '''ymin''': ANY(lowerCamelCase ), '''xmax''': ANY(lowerCamelCase ), '''ymax''': ANY(lowerCamelCase )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
pass
@require_torch
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Dict = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
snake_case__ : List[Any] = AutoModelForObjectDetection.from_pretrained(lowerCamelCase )
snake_case__ : str = AutoFeatureExtractor.from_pretrained(lowerCamelCase )
snake_case__ : str = ObjectDetectionPipeline(model=lowerCamelCase , feature_extractor=lowerCamelCase )
snake_case__ : Optional[int] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
] , )
snake_case__ : str = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
[
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
] , )
@require_torch
@slow
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : int = '''facebook/detr-resnet-50'''
snake_case__ : int = AutoModelForObjectDetection.from_pretrained(lowerCamelCase )
snake_case__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase )
snake_case__ : List[Any] = ObjectDetectionPipeline(model=lowerCamelCase , feature_extractor=lowerCamelCase )
snake_case__ : List[str] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
snake_case__ : List[Any] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : Union[str, Any] = '''facebook/detr-resnet-50'''
snake_case__ : int = pipeline('''object-detection''' , model=lowerCamelCase )
snake_case__ : int = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
snake_case__ : Dict = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : str = 0.9_985
snake_case__ : Optional[Any] = '''facebook/detr-resnet-50'''
snake_case__ : Optional[int] = pipeline('''object-detection''' , model=lowerCamelCase )
snake_case__ : Union[str, Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=lowerCamelCase )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : int = '''Narsil/layoutlmv3-finetuned-funsd'''
snake_case__ : Tuple = 0.9_993
snake_case__ : Optional[int] = pipeline('''object-detection''' , model=lowerCamelCase , threshold=lowerCamelCase )
snake_case__ : str = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
{'''score''': 0.9_993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
] , )
| 694 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
snake_case__ : Optional[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
def _A ( snake_case__ : list , snake_case__ : list ):
_validate_point(snake_case__ )
_validate_point(snake_case__ )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def _A ( snake_case__ : list[float] ):
if point:
if isinstance(snake_case__ , snake_case__ ):
for item in point:
if not isinstance(snake_case__ , (int, float) ):
snake_case__ : List[Any] = (
'''Expected a list of numbers as input, found '''
f'''{type(snake_case__ ).__name__}'''
)
raise TypeError(snake_case__ )
else:
snake_case__ : Optional[int] = f'''Expected a list of numbers as input, found {type(snake_case__ ).__name__}'''
raise TypeError(snake_case__ )
else:
raise ValueError('''Missing an input''' )
def _A ( snake_case__ : list , snake_case__ : list ):
_validate_point(snake_case__ )
_validate_point(snake_case__ )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(snake_case__ , snake_case__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCAmelCase : Union[str, Any] = "\nimport os\n"
_lowerCAmelCase : Optional[int] = "\ndef foo():\n import os\n return False\n"
_lowerCAmelCase : Union[str, Any] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
_lowerCAmelCase : str = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
_lowerCAmelCase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
_lowerCAmelCase : List[str] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
_lowerCAmelCase : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case__ )
def _A ( snake_case__ : List[str] , snake_case__ : Dict ):
snake_case__ : str = os.path.join(snake_case__ , '''test_file.py''' )
with open(snake_case__ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case__ )
snake_case__ : int = get_imports(snake_case__ )
assert parsed_imports == ["os"]
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : list[list[int]] ):
snake_case__ : Dict = len(snake_case__ )
# We need to create solution object to save path.
snake_case__ : Any = [[0 for _ in range(snake_case__ )] for _ in range(snake_case__ )]
snake_case__ : Dict = run_maze(snake_case__ , 0 , 0 , snake_case__ )
if solved:
print('''\n'''.join(str(snake_case__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _A ( snake_case__ : list[list[int]] , snake_case__ : int , snake_case__ : int , snake_case__ : list[list[int]] ):
snake_case__ : Union[str, Any] = len(snake_case__ )
# Final check point.
if i == j == (size - 1):
snake_case__ : Tuple = 1
return True
snake_case__ : Any = (not i < 0) and (not j < 0) # Check lower bounds
snake_case__ : Tuple = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
snake_case__ : str = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
snake_case__ : List[str] = 1
# check for directions
if (
run_maze(snake_case__ , i + 1 , snake_case__ , snake_case__ )
or run_maze(snake_case__ , snake_case__ , j + 1 , snake_case__ )
or run_maze(snake_case__ , i - 1 , snake_case__ , snake_case__ )
or run_maze(snake_case__ , snake_case__ , j - 1 , snake_case__ )
):
return True
snake_case__ : Tuple = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'markuplm'
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=256 , lowerCamelCase=1024 , lowerCamelCase=216 , lowerCamelCase=1001 , lowerCamelCase=32 , lowerCamelCase=50 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> str:
"""simple docstring"""
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Optional[int] = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : List[Any] = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Optional[Any] = position_embedding_type
snake_case__ : Dict = use_cache
snake_case__ : int = classifier_dropout
# additional properties
snake_case__ : Union[str, Any] = max_depth
snake_case__ : Dict = max_xpath_tag_unit_embeddings
snake_case__ : Any = max_xpath_subs_unit_embeddings
snake_case__ : int = tag_pad_id
snake_case__ : Tuple = subs_pad_id
snake_case__ : Dict = xpath_unit_hidden_size
| 694 | 1 |
'''simple docstring'''
def _A ( ):
snake_case__ : Optional[Any] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
snake_case__ : Tuple = 6
snake_case__ : str = 1
snake_case__ : str = 19_01
snake_case__ : Tuple = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
snake_case__ : List[str] = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
snake_case__ : Optional[Any] = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
snake_case__ : int = day - days_per_month[month - 2]
if month > 12:
year += 1
snake_case__ : int = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 694 |
'''simple docstring'''
def _A ( snake_case__ : float ):
return 10 - x * x
def _A ( snake_case__ : float , snake_case__ : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case__ ) * equation(snake_case__ ) >= 0:
raise ValueError('''Wrong space!''' )
snake_case__ : List[str] = a
while (b - a) >= 0.01:
# Find middle point
snake_case__ : Optional[int] = (a + b) / 2
# Check if middle point is root
if equation(snake_case__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case__ ) * equation(snake_case__ ) < 0:
snake_case__ : Dict = c
else:
snake_case__ : List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 694 | 1 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 694 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : list[float] , snake_case__ : list[float] ):
snake_case__ : Dict = sorted(numsa + numsa )
snake_case__ ,snake_case__ : Tuple = divmod(len(snake_case__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCAmelCase : List[str] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 694 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : str = {
"configuration_xlm_roberta_xl": [
"XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaXLConfig",
"XLMRobertaXLOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 694 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Any = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_lowerCAmelCase : Optional[int] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_lowerCAmelCase : Dict = "UperNetConfig"
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0 , lowerCamelCase = False , lowerCamelCase = 1 , ) -> None:
"""simple docstring"""
super().__init__()
snake_case__ : List[Any] = nn.Convad(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , kernel_size=lowerCamelCase , padding=lowerCamelCase , bias=lowerCamelCase , dilation=lowerCamelCase , )
snake_case__ : Union[str, Any] = nn.BatchNormad(lowerCamelCase )
snake_case__ : int = nn.ReLU()
def lowercase__ ( self , lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
snake_case__ : Optional[Any] = self.conv(lowerCamelCase )
snake_case__ : Optional[int] = self.batch_norm(lowerCamelCase )
snake_case__ : Dict = self.activation(lowerCamelCase )
return output
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
super().__init__()
snake_case__ : Optional[Any] = [
nn.AdaptiveAvgPoolad(lowerCamelCase ),
UperNetConvModule(lowerCamelCase , lowerCamelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(lowerCamelCase ) , lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
snake_case__ : Dict = input
for layer in self.layers:
snake_case__ : List[Any] = layer(lowerCamelCase )
return hidden_state
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
super().__init__()
snake_case__ : int = pool_scales
snake_case__ : Optional[Any] = align_corners
snake_case__ : Dict = in_channels
snake_case__ : int = channels
snake_case__ : Union[str, Any] = []
for i, pool_scale in enumerate(lowerCamelCase ):
snake_case__ : List[Any] = UperNetPyramidPoolingBlock(pool_scale=lowerCamelCase , in_channels=lowerCamelCase , channels=lowerCamelCase )
self.blocks.append(lowerCamelCase )
self.add_module(str(lowerCamelCase ) , lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> List[torch.Tensor]:
"""simple docstring"""
snake_case__ : str = []
for ppm in self.blocks:
snake_case__ : Any = ppm(lowerCamelCase )
snake_case__ : Tuple = nn.functional.interpolate(
lowerCamelCase , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(lowerCamelCase )
return ppm_outs
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
snake_case__ : List[Any] = config
snake_case__ : Optional[int] = config.pool_scales # e.g. (1, 2, 3, 6)
snake_case__ : Any = in_channels
snake_case__ : Union[str, Any] = config.hidden_size
snake_case__ : Union[str, Any] = False
snake_case__ : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
snake_case__ : List[str] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
snake_case__ : Any = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
snake_case__ : str = nn.ModuleList()
snake_case__ : str = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
snake_case__ : Optional[Any] = UperNetConvModule(lowerCamelCase , self.channels , kernel_size=1 )
snake_case__ : List[str] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(lowerCamelCase )
self.fpn_convs.append(lowerCamelCase )
snake_case__ : Union[str, Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
self.apply(self._init_weights )
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowerCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : Tuple = inputs[-1]
snake_case__ : str = [x]
psp_outs.extend(self.psp_modules(lowerCamelCase ) )
snake_case__ : int = torch.cat(lowerCamelCase , dim=1 )
snake_case__ : Union[str, Any] = self.bottleneck(lowerCamelCase )
return output
def lowercase__ ( self , lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
snake_case__ : Dict = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(lowerCamelCase ) )
# build top-down path
snake_case__ : int = len(lowerCamelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
snake_case__ : int = laterals[i - 1].shape[2:]
snake_case__ : List[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=lowerCamelCase , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
snake_case__ : Tuple = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
snake_case__ : Union[str, Any] = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
snake_case__ : Optional[Any] = torch.cat(lowerCamelCase , dim=1 )
snake_case__ : Union[str, Any] = self.fpn_bottleneck(lowerCamelCase )
snake_case__ : Optional[int] = self.classifier(lowerCamelCase )
return output
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase = 2 , lowerCamelCase = 3 , lowerCamelCase = 1 ) -> None:
"""simple docstring"""
super().__init__()
snake_case__ : int = config
snake_case__ : int = config.auxiliary_in_channels
snake_case__ : Optional[Any] = config.auxiliary_channels
snake_case__ : Union[str, Any] = config.auxiliary_num_convs
snake_case__ : Union[str, Any] = config.auxiliary_concat_input
snake_case__ : str = in_index
snake_case__ : Dict = (kernel_size // 2) * dilation
snake_case__ : List[str] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=lowerCamelCase , padding=lowerCamelCase , dilation=lowerCamelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=lowerCamelCase , padding=lowerCamelCase , dilation=lowerCamelCase ) )
if self.num_convs == 0:
snake_case__ : Tuple = nn.Identity()
else:
snake_case__ : Dict = nn.Sequential(*lowerCamelCase )
if self.concat_input:
snake_case__ : str = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=lowerCamelCase , padding=kernel_size // 2 )
snake_case__ : List[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
self.apply(self._init_weights )
def lowercase__ ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
if isinstance(lowerCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase__ ( self , lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
snake_case__ : Any = encoder_hidden_states[self.in_index]
snake_case__ : Optional[Any] = self.convs(lowerCamelCase )
if self.concat_input:
snake_case__ : List[Any] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
snake_case__ : Optional[Any] = self.classifier(lowerCamelCase )
return output
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = UperNetConfig
_lowerCAmelCase = 'pixel_values'
_lowerCAmelCase = True
def lowercase__ ( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
if isinstance(lowerCamelCase , lowerCamelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=False ) -> str:
"""simple docstring"""
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : List[Any] = value
_lowerCAmelCase : List[Any] = R"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowerCAmelCase : Tuple = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' , __lowerCamelCase , )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
super().__init__(lowerCamelCase )
snake_case__ : int = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
snake_case__ : Tuple = UperNetHead(lowerCamelCase , in_channels=self.backbone.channels )
snake_case__ : Any = UperNetFCNHead(lowerCamelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC )
def lowercase__ ( self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ) -> Union[tuple, SemanticSegmenterOutput]:
"""simple docstring"""
snake_case__ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ : Optional[int] = output_attentions if output_attentions is not None else self.config.output_attentions
snake_case__ : int = self.backbone.forward_with_filtered_kwargs(
lowerCamelCase , output_hidden_states=lowerCamelCase , output_attentions=lowerCamelCase )
snake_case__ : Dict = outputs.feature_maps
snake_case__ : List[str] = self.decode_head(lowerCamelCase )
snake_case__ : int = nn.functional.interpolate(lowerCamelCase , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=lowerCamelCase )
snake_case__ : int = None
if self.auxiliary_head is not None:
snake_case__ : Any = self.auxiliary_head(lowerCamelCase )
snake_case__ : List[Any] = nn.functional.interpolate(
lowerCamelCase , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=lowerCamelCase )
snake_case__ : List[Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
snake_case__ : Dict = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
snake_case__ : List[str] = loss_fct(lowerCamelCase , lowerCamelCase )
snake_case__ : int = loss_fct(lowerCamelCase , lowerCamelCase )
snake_case__ : Any = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
snake_case__ : Any = (logits,) + outputs[1:]
else:
snake_case__ : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=lowerCamelCase , logits=lowerCamelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 694 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'encoder-decoder'
_lowerCAmelCase = True
def __init__( self , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case__ : List[str] = kwargs.pop('''encoder''' )
snake_case__ : Any = encoder_config.pop('''model_type''' )
snake_case__ : List[str] = kwargs.pop('''decoder''' )
snake_case__ : str = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case__ : Tuple = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : Optional[Any] = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : str = True
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case__ : Optional[int] = True
snake_case__ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = copy.deepcopy(self.__dict__ )
snake_case__ : List[Any] = self.encoder.to_dict()
snake_case__ : str = self.decoder.to_dict()
snake_case__ : Any = self.__class__.model_type
return output
| 694 | 1 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=0 ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[Any] = 1.0 if scale is None else scale
snake_case__ : Dict = 0.0 if loc is None else loc
super().__init__(lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase )] )
@property
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return self.variance.sqrt()
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Tuple = args_dim
snake_case__ : str = nn.ModuleList([nn.Linear(lowerCamelCase , lowerCamelCase ) for dim in args_dim.values()] )
snake_case__ : Optional[int] = domain_map
def lowercase__ ( self , lowerCamelCase ) -> Tuple[torch.Tensor]:
"""simple docstring"""
snake_case__ : Any = [proj(lowerCamelCase ) for proj in self.proj]
return self.domain_map(*lowerCamelCase )
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Tuple = function
def lowercase__ ( self , lowerCamelCase , *lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.function(lowerCamelCase , *lowerCamelCase )
class snake_case :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self , lowerCamelCase = 1 ) -> None:
"""simple docstring"""
snake_case__ : Optional[Any] = dim
snake_case__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowerCamelCase )
else:
return Independent(self.distribution_class(*lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Distribution:
"""simple docstring"""
snake_case__ : List[Any] = self._base_distribution(lowerCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase , loc=lowerCamelCase , scale=lowerCamelCase , event_dim=self.event_dim )
@property
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def lowercase__ ( self ) -> float:
"""simple docstring"""
return 0.0
def lowercase__ ( self , lowerCamelCase ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowercase__ ( self , *lowerCamelCase ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def lowercase__ ( lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Tuple = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
snake_case__ : Optional[int] = 2.0 + cls.squareplus(lowerCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowercase__ ( self , lowerCamelCase ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : str = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
_lowerCAmelCase : Dict = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
_lowerCAmelCase : str = {"vinai/bartpho-syllable": 1_0_2_4}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
snake_case__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
snake_case__ : int = vocab_file
snake_case__ : Optional[Any] = monolingual_vocab_file
snake_case__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : List[str] = cnt
cnt += 1
with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
snake_case__ : Optional[int] = line.strip().split()[0]
snake_case__ : List[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : Any = len(self.fairseq_tokens_to_ids )
snake_case__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = self.__dict__.copy()
snake_case__ : Any = None
snake_case__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : Dict = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : List[str] = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : int = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , '''wb''' ) as fi:
snake_case__ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowerCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 1 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase = "cpu" , lowerCamelCase = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
snake_case__ : Union[str, Any] = device
snake_case__ : int = CLIPTokenizerFast.from_pretrained(lowerCamelCase )
snake_case__ : str = [0.48_145_466, 0.4_578_275, 0.40_821_073]
snake_case__ : Dict = [0.26_862_954, 0.26_130_258, 0.27_577_711]
snake_case__ : List[Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
snake_case__ : Optional[Any] = torchvision.transforms.Resize(224 )
snake_case__ : int = torchvision.transforms.CenterCrop(224 )
def lowercase__ ( self , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Any = self.resize(lowerCamelCase )
snake_case__ : str = self.center_crop(lowerCamelCase )
snake_case__ : List[Any] = self.normalize(lowerCamelCase )
return images
def __call__( self , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : int = self.tokenizer(text=lowerCamelCase , **lowerCamelCase )
snake_case__ : List[Any] = self.preprocess_img(lowerCamelCase )
snake_case__ : Tuple = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase=10 , lowerCamelCase=0.01 , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase="image" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , ) -> None:
"""simple docstring"""
super().__init__()
snake_case__ : Any = None
snake_case__ : Any = device if device else get_device()
if vqgan:
snake_case__ : Tuple = vqgan
else:
snake_case__ : List[Any] = load_vqgan(self.device , conf_path=lowerCamelCase , ckpt_path=lowerCamelCase )
self.vqgan.eval()
if clip:
snake_case__ : int = clip
else:
snake_case__ : List[Any] = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
snake_case__ : str = ProcessorGradientFlow(device=self.device )
snake_case__ : Dict = iterations
snake_case__ : str = lr
snake_case__ : Any = log
snake_case__ : str = make_grid
snake_case__ : str = return_val
snake_case__ : Union[str, Any] = quantize
snake_case__ : Optional[Any] = self.vqgan.decoder.z_shape
def lowercase__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=5 , lowerCamelCase=True ) -> Tuple:
"""simple docstring"""
snake_case__ : int = []
if output_path is None:
snake_case__ : List[Any] = '''./animation.gif'''
if input_path is None:
snake_case__ : Optional[int] = self.save_path
snake_case__ : Optional[Any] = sorted(glob(input_path + '''/*''' ) )
if not len(lowerCamelCase ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(lowerCamelCase ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
snake_case__ : int = total_duration / len(lowerCamelCase )
snake_case__ : List[str] = [frame_duration] * len(lowerCamelCase )
if extend_frames:
snake_case__ : Optional[Any] = 1.5
snake_case__ : str = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(lowerCamelCase ) )
imageio.mimsave(lowerCamelCase , lowerCamelCase , duration=lowerCamelCase )
print(f'''gif saved to {output_path}''' )
def lowercase__ ( self , lowerCamelCase=None , lowerCamelCase=None ) -> Any:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
snake_case__ : Tuple = preprocess(Image.open(lowerCamelCase ) , target_image_size=256 ).to(self.device )
snake_case__ : Optional[int] = preprocess_vqgan(lowerCamelCase )
snake_case__ ,*snake_case__ : Optional[Any] = self.vqgan.encode(lowerCamelCase )
return z
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.latent.detach().requires_grad_()
snake_case__ : Optional[int] = base_latent + transform_vector
if self.quantize:
snake_case__ ,*snake_case__ : Optional[Any] = self.vqgan.quantize(lowerCamelCase )
else:
snake_case__ : Optional[Any] = trans_latent
return self.vqgan.decode(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ) -> str:
"""simple docstring"""
snake_case__ : List[str] = self.clip_preprocessor(text=lowerCamelCase , images=lowerCamelCase , return_tensors='''pt''' , padding=lowerCamelCase )
snake_case__ : List[Any] = self.clip(**lowerCamelCase )
snake_case__ : int = clip_outputs.logits_per_image
if weights is not None:
snake_case__ : Dict = similarity_logits * weights
return similarity_logits.sum()
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : List[str] = self._get_clip_similarity(pos_prompts['''prompts'''] , lowerCamelCase , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
snake_case__ : Optional[Any] = self._get_clip_similarity(neg_prompts['''prompts'''] , lowerCamelCase , weights=neg_prompts['''weights'''] )
else:
snake_case__ : int = torch.tensor([1] , device=self.device )
snake_case__ : str = -torch.log(lowerCamelCase ) + torch.log(lowerCamelCase )
return loss
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : Dict = torch.randn_like(self.latent , requires_grad=lowerCamelCase , device=self.device )
snake_case__ : List[str] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
snake_case__ : Optional[Any] = self._add_vector(lowerCamelCase )
snake_case__ : int = loop_post_process(lowerCamelCase )
snake_case__ : List[Any] = self._get_CLIP_loss(lowerCamelCase , lowerCamelCase , lowerCamelCase )
print('''CLIP loss''' , lowerCamelCase )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=lowerCamelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
wandb.init(reinit=lowerCamelCase , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
snake_case__ : List[str] = Image.open(lowerCamelCase )
snake_case__ : Union[str, Any] = image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(lowerCamelCase ) )
def lowercase__ ( self , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
if not prompts:
return []
snake_case__ : str = []
snake_case__ : Dict = []
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : Dict = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(lowerCamelCase , (tuple, list) ):
snake_case__ : Union[str, Any] = prompt[0]
snake_case__ : Dict = float(prompt[1] )
elif ":" in prompt:
snake_case__ ,snake_case__ : str = prompt.split(''':''' )
snake_case__ : Optional[int] = float(lowerCamelCase )
else:
snake_case__ : str = prompt
snake_case__ : Tuple = 1.0
processed_prompts.append(lowerCamelCase )
weights.append(lowerCamelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCamelCase , device=self.device ),
}
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=None , ) -> Optional[int]:
"""simple docstring"""
if image_path:
snake_case__ : List[str] = self._get_latent(lowerCamelCase )
else:
snake_case__ : Tuple = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowerCamelCase , lowerCamelCase , lowerCamelCase )
assert pos_prompts, "You must provide at least one positive prompt."
snake_case__ : Union[str, Any] = self.process_prompts(lowerCamelCase )
snake_case__ : List[Any] = self.process_prompts(lowerCamelCase )
if save_final and save_path is None:
snake_case__ : Dict = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(lowerCamelCase ):
os.makedirs(lowerCamelCase )
else:
snake_case__ : str = save_path + '''_''' + get_timestamp()
os.makedirs(lowerCamelCase )
snake_case__ : Tuple = save_path
snake_case__ : Dict = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(lowerCamelCase ) )
snake_case__ : int = loop_post_process(lowerCamelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCamelCase , lowerCamelCase , lowerCamelCase ) ):
if show_intermediate:
show_pil(lowerCamelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(lowerCamelCase )} )
if show_final:
show_pil(lowerCamelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}_final.png''' ) )
| 694 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowerCAmelCase : str = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowerCAmelCase : Optional[int] = parser.parse_args()
_lowerCAmelCase : Union[str, Any] = "cpu"
_lowerCAmelCase : List[str] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowerCAmelCase : Union[str, Any] = "path-to-your-trained-model"
_lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowerCAmelCase : Optional[Any] = pipe.to(device)
# to channels last
_lowerCAmelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last)
_lowerCAmelCase : str = pipe.vae.to(memory_format=torch.channels_last)
_lowerCAmelCase : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowerCAmelCase : Optional[int] = torch.randn(2, 4, 6_4, 6_4)
_lowerCAmelCase : List[str] = torch.rand(1) * 9_9_9
_lowerCAmelCase : Optional[int] = torch.randn(2, 7_7, 7_6_8)
_lowerCAmelCase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowerCAmelCase : Tuple = 6_6_6
_lowerCAmelCase : str = torch.Generator(device).manual_seed(seed)
_lowerCAmelCase : Dict = {"generator": generator}
if args.steps is not None:
_lowerCAmelCase : Tuple = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowerCAmelCase : Any = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 694 | 1 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_lowerCAmelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Any:
"""simple docstring"""
super().__init__(*lowerCamelCase , **lowerCamelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowercase__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None ) -> str:
"""simple docstring"""
snake_case__ : Dict = {}
snake_case__ : Tuple = {}
if prompt is not None:
snake_case__ : List[str] = prompt
if generate_kwargs is not None:
snake_case__ : Any = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
snake_case__ : Dict = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
snake_case__ : int = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowerCamelCase , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return super().__call__(lowerCamelCase , **lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> Dict:
"""simple docstring"""
snake_case__ : List[Any] = load_image(lowerCamelCase )
if prompt is not None:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCamelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
snake_case__ : int = self.model.config.model_type
if model_type == "git":
snake_case__ : Optional[int] = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
snake_case__ : Dict = self.tokenizer(text=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids
snake_case__ : List[Any] = [self.tokenizer.cls_token_id] + input_ids
snake_case__ : List[Any] = torch.tensor(lowerCamelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
snake_case__ : Optional[int] = self.image_processor(images=lowerCamelCase , header_text=lowerCamelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
snake_case__ : List[Any] = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
snake_case__ : Any = self.tokenizer(lowerCamelCase , return_tensors=self.framework )
model_inputs.update(lowerCamelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
snake_case__ : str = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
snake_case__ : Optional[int] = None
return model_inputs
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> Dict:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , lowerCamelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
snake_case__ : Tuple = None
if generate_kwargs is None:
snake_case__ : Union[str, Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
snake_case__ : List[str] = model_inputs.pop(self.model.main_input_name )
snake_case__ : Dict = self.model.generate(lowerCamelCase , **lowerCamelCase , **lowerCamelCase )
return model_outputs
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple = []
for output_ids in model_outputs:
snake_case__ : Dict = {
'''generated_text''': self.tokenizer.decode(
lowerCamelCase , skip_special_tokens=lowerCamelCase , )
}
records.append(lowerCamelCase )
return records
| 694 |
'''simple docstring'''
import socket
def _A ( ):
snake_case__ : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case__ : str = socket.gethostname()
snake_case__ : Union[str, Any] = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
snake_case__ : int = sock.recv(10_24 )
if not data:
break
out_file.write(snake_case__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 694 | 1 |
'''simple docstring'''
def _A ( snake_case__ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 694 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
snake_case__ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
snake_case__ : Optional[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
from math import isqrt
def _A ( snake_case__ : int ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) )
def _A ( snake_case__ : int = 10**6 ):
snake_case__ : str = 0
snake_case__ : List[str] = 1
snake_case__ : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model"}
_lowerCAmelCase : Tuple = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
_lowerCAmelCase : Any = {
"moussaKam/mbarthez": 1_0_2_4,
"moussaKam/barthez": 1_0_2_4,
"moussaKam/barthez-orangesum-title": 1_0_2_4,
}
_lowerCAmelCase : List[str] = "▁"
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : List[str] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
snake_case__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
snake_case__ : int = vocab_file
snake_case__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
snake_case__ : int = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
snake_case__ : Any = len(self.sp_model ) - 1
snake_case__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
snake_case__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : List[str] = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return len(self.sp_model )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case__ : Union[str, Any] = self.sp_model.PieceToId(lowerCamelCase )
return spm_id if spm_id else self.unk_token_id
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = []
snake_case__ : Dict = ''''''
snake_case__ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase ) + token
snake_case__ : Any = True
snake_case__ : List[str] = []
else:
current_sub_tokens.append(lowerCamelCase )
snake_case__ : Dict = False
out_string += self.sp_model.decode(lowerCamelCase )
return out_string.strip()
def __getstate__( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : str = self.__dict__.copy()
snake_case__ : int = None
return state
def __setstate__( self , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[str] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , '''wb''' ) as fi:
snake_case__ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
| 694 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_lowerCAmelCase : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
_lowerCAmelCase : Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
_lowerCAmelCase : List[str] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 , lowerCamelCase="binary" , lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = fa_score(
lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase )
return {"f1": float(lowerCamelCase ) if score.size == 1 else score}
| 694 | 1 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
_lowerCAmelCase : Optional[int] = 5_0 # max width of layer names
_lowerCAmelCase : Optional[int] = 7_0 # max width of quantizer names
def _A ( snake_case__ : Any ):
snake_case__ : List[str] = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=snake_case__ , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=snake_case__ , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=snake_case__ , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=snake_case__ , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=snake_case__ , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=snake_case__ , type=snake_case__ , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=snake_case__ , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def _A ( snake_case__ : Optional[Any] ):
if args.calibrator == "max":
snake_case__ : List[Any] = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
snake_case__ : Union[str, Any] = '''histogram'''
elif args.calibrator == "mse":
snake_case__ : int = '''histogram'''
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
snake_case__ : List[str] = QuantDescriptor(num_bits=args.aprec , calib_method=snake_case__ )
snake_case__ : Dict = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(snake_case__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(snake_case__ )
def _A ( snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str]=False , snake_case__ : Union[str, Any]=False ):
logger.info('''Configuring Model for Quantization''' )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(snake_case__ , ['''embeddings'''] , which='''weight''' , _disabled=snake_case__ )
if args.quant_disable:
set_quantizer_by_name(snake_case__ , [''''''] , _disabled=snake_case__ )
if args.quant_disable_keyword:
set_quantizer_by_name(snake_case__ , args.quant_disable_keyword , _disabled=snake_case__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(snake_case__ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=snake_case__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(snake_case__ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=snake_case__ )
if args.recalibrate_weights:
recalibrate_weights(snake_case__ )
if args.fuse_qkv:
fuse_qkv(snake_case__ , snake_case__ )
if args.clip_gelu:
clip_gelu(snake_case__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(snake_case__ )
def _A ( snake_case__ : List[Any] ):
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def _A ( snake_case__ : int , snake_case__ : List[Any] ):
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(snake_case__ )
def _A ( snake_case__ : List[Any] , snake_case__ : Tuple ):
def fusea(snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Optional[int] ):
for mod in [qq, qk, qv]:
if not hasattr(snake_case__ , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
snake_case__ : List[str] = qq._amax.detach().item()
snake_case__ : Optional[int] = qk._amax.detach().item()
snake_case__ : Optional[Any] = qv._amax.detach().item()
snake_case__ : List[str] = max(snake_case__ , snake_case__ , snake_case__ )
qq._amax.fill_(snake_case__ )
qk._amax.fill_(snake_case__ )
qv._amax.fill_(snake_case__ )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _A ( snake_case__ : Tuple , snake_case__ : Dict ):
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
snake_case__ : List[Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=snake_case__ )
snake_case__ : Tuple = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _A ( snake_case__ : Optional[Any] ):
for name, mod in model.named_modules():
if hasattr(snake_case__ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
snake_case__ : Optional[Any] = mod.weight.shape[0]
snake_case__ : int = mod._weight_quantizer._amax.detach()
snake_case__ : int = torch.ones(snake_case__ , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _A ( snake_case__ : Optional[int] ):
for name, mod in model.named_modules():
if hasattr(snake_case__ , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
snake_case__ : List[Any] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
snake_case__ : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set
snake_case__ : Dict = pytorch_quantization.utils.reduce_amax(mod.weight , axis=snake_case__ , keepdims=snake_case__ ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
snake_case__ : Union[str, Any] = amax
def _A ( snake_case__ : Optional[int] , snake_case__ : int=25 , snake_case__ : Optional[int]=1_80 , snake_case__ : str=None ):
if ignore is None:
snake_case__ : List[str] = []
elif not isinstance(snake_case__ , snake_case__ ):
snake_case__ : str = [ignore]
snake_case__ : Union[str, Any] = 0
for name, mod in model.named_modules():
if not hasattr(snake_case__ , '''weight''' ):
continue
snake_case__ : str = max(snake_case__ , len(snake_case__ ) )
for name, mod in model.named_modules():
snake_case__ : Dict = getattr(snake_case__ , '''_input_quantizer''' , snake_case__ )
snake_case__ : int = getattr(snake_case__ , '''_weight_quantizer''' , snake_case__ )
if not hasattr(snake_case__ , '''weight''' ):
continue
if type(snake_case__ ) in ignore:
continue
if [True for s in ignore if type(snake_case__ ) is str and s in name]:
continue
snake_case__ : Union[str, Any] = f'''Act:{input_q.extra_repr()}'''
snake_case__ : List[Any] = f'''Wgt:{weight_q.extra_repr()}'''
snake_case__ : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(snake_case__ ) <= line_width:
logger.info(snake_case__ )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{' ':{name_width}} {wgt_str}''' )
def _A ( snake_case__ : int ):
snake_case__ : str = 0
for name, mod in model.named_modules():
if isinstance(snake_case__ , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def _A ( snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ):
snake_case__ : int = getattr(snake_case__ , snake_case__ , snake_case__ )
if quantizer_mod is not None:
assert hasattr(snake_case__ , snake_case__ )
setattr(snake_case__ , snake_case__ , snake_case__ )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def _A ( snake_case__ : Any , snake_case__ : Any , snake_case__ : int="both" , **snake_case__ : List[str] ):
snake_case__ : str = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(snake_case__ , snake_case__ , '''_input_quantizer''' , snake_case__ , snake_case__ )
if which in ["weight", "both"]:
set_quantizer(snake_case__ , snake_case__ , '''_weight_quantizer''' , snake_case__ , snake_case__ )
logger.info(snake_case__ )
def _A ( snake_case__ : List[str] , snake_case__ : Any , **snake_case__ : Any ):
for name, mod in model.named_modules():
if hasattr(snake_case__ , '''_input_quantizer''' ) or hasattr(snake_case__ , '''_weight_quantizer''' ):
for n in names:
if re.search(snake_case__ , snake_case__ ):
set_quantizers(snake_case__ , snake_case__ , **snake_case__ )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(snake_case__ , snake_case__ ):
snake_case__ : Any = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(snake_case__ , snake_case__ , snake_case__ )
logger.info(snake_case__ )
| 694 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 42
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase = 65536 , lowerCamelCase = None , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 0 , lowerCamelCase = "fourier" , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = 0.0 , lowerCamelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase = "UNetMidBlock1D" , lowerCamelCase = None , lowerCamelCase = (32, 32, 64) , lowerCamelCase = None , lowerCamelCase = 8 , lowerCamelCase = 1 , lowerCamelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
snake_case__ : Optional[int] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase , log=lowerCamelCase , flip_sin_to_cos=lowerCamelCase )
snake_case__ : List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case__ : Dict = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase , downscale_freq_shift=lowerCamelCase )
snake_case__ : Dict = block_out_channels[0]
if use_timestep_embedding:
snake_case__ : Any = block_out_channels[0] * 4
snake_case__ : Optional[Any] = TimestepEmbedding(
in_channels=lowerCamelCase , time_embed_dim=lowerCamelCase , act_fn=lowerCamelCase , out_dim=block_out_channels[0] , )
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : List[Any] = None
snake_case__ : Union[str, Any] = nn.ModuleList([] )
snake_case__ : List[str] = None
# down
snake_case__ : Tuple = in_channels
for i, down_block_type in enumerate(lowerCamelCase ):
snake_case__ : Tuple = output_channel
snake_case__ : List[str] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case__ : List[Any] = i == len(lowerCamelCase ) - 1
snake_case__ : Dict = get_down_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase )
# mid
snake_case__ : Optional[int] = get_mid_block(
lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase , add_downsample=lowerCamelCase , )
# up
snake_case__ : Union[str, Any] = list(reversed(lowerCamelCase ) )
snake_case__ : Any = reversed_block_out_channels[0]
if out_block_type is None:
snake_case__ : List[Any] = out_channels
else:
snake_case__ : Dict = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase ):
snake_case__ : List[str] = output_channel
snake_case__ : List[str] = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels
)
snake_case__ : List[str] = i == len(lowerCamelCase ) - 1
snake_case__ : str = get_up_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase )
snake_case__ : Optional[Any] = output_channel
# out
snake_case__ : List[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
snake_case__ : Union[str, Any] = get_out_block(
out_block_type=lowerCamelCase , num_groups_out=lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase , act_fn=lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
snake_case__ : str = timestep
if not torch.is_tensor(lowerCamelCase ):
snake_case__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0:
snake_case__ : Optional[Any] = timesteps[None].to(sample.device )
snake_case__ : Any = self.time_proj(lowerCamelCase )
if self.config.use_timestep_embedding:
snake_case__ : Tuple = self.time_mlp(lowerCamelCase )
else:
snake_case__ : Union[str, Any] = timestep_embed[..., None]
snake_case__ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
snake_case__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
snake_case__ : List[Any] = ()
for downsample_block in self.down_blocks:
snake_case__ ,snake_case__ : Optional[int] = downsample_block(hidden_states=lowerCamelCase , temb=lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case__ : Any = self.mid_block(lowerCamelCase , lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
snake_case__ : str = down_block_res_samples[-1:]
snake_case__ : int = down_block_res_samples[:-1]
snake_case__ : Optional[Any] = upsample_block(lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , temb=lowerCamelCase )
# 5. post-process
if self.out_block:
snake_case__ : Dict = self.out_block(lowerCamelCase , lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase )
| 694 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Dict = 1
snake_case__ : str = 3
snake_case__ : Any = (32, 32)
snake_case__ : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase )
return image
@property
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowerCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : int = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def lowercase__ ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(lowerCamelCase )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Union[str, Any] = self.dummy_cond_unet_upscale
snake_case__ : Union[str, Any] = DDPMScheduler()
snake_case__ : Any = DDIMScheduler(prediction_type='''v_prediction''' )
snake_case__ : Dict = self.dummy_vae
snake_case__ : Tuple = self.dummy_text_encoder
snake_case__ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ : Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : int = Image.fromarray(np.uinta(lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case__ : str = StableDiffusionUpscalePipeline(
unet=lowerCamelCase , low_res_scheduler=lowerCamelCase , scheduler=lowerCamelCase , vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , max_noise_level=350 , )
snake_case__ : Tuple = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
snake_case__ : int = '''A painting of a squirrel eating a burger'''
snake_case__ : List[str] = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
snake_case__ : Optional[int] = sd_pipe(
[prompt] , image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
snake_case__ : Any = output.images
snake_case__ : Dict = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
snake_case__ : List[Any] = sd_pipe(
[prompt] , image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=lowerCamelCase , )[0]
snake_case__ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
snake_case__ : Tuple = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
snake_case__ : str = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[Any] = self.dummy_cond_unet_upscale
snake_case__ : Optional[Any] = DDPMScheduler()
snake_case__ : int = DDIMScheduler(prediction_type='''v_prediction''' )
snake_case__ : List[str] = self.dummy_vae
snake_case__ : List[Any] = self.dummy_text_encoder
snake_case__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ : Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : str = Image.fromarray(np.uinta(lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case__ : List[str] = StableDiffusionUpscalePipeline(
unet=lowerCamelCase , low_res_scheduler=lowerCamelCase , scheduler=lowerCamelCase , vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , max_noise_level=350 , )
snake_case__ : Dict = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
snake_case__ : int = '''A painting of a squirrel eating a burger'''
snake_case__ : Optional[Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
snake_case__ : int = output.images
assert image.shape[0] == 2
snake_case__ : Dict = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
snake_case__ : List[Any] = sd_pipe(
[prompt] , image=lowerCamelCase , generator=lowerCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
snake_case__ : Tuple = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : int = self.dummy_cond_unet_upscale
snake_case__ : Any = DDPMScheduler()
snake_case__ : int = DDIMScheduler(prediction_type='''v_prediction''' )
snake_case__ : Tuple = self.dummy_vae
snake_case__ : str = self.dummy_text_encoder
snake_case__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ : Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : Tuple = Image.fromarray(np.uinta(lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
snake_case__ : Dict = unet.half()
snake_case__ : Optional[Any] = text_encoder.half()
# make sure here that pndm scheduler skips prk
snake_case__ : str = StableDiffusionUpscalePipeline(
unet=lowerCamelCase , low_res_scheduler=lowerCamelCase , scheduler=lowerCamelCase , vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , max_noise_level=350 , )
snake_case__ : Tuple = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
snake_case__ : Union[str, Any] = '''A painting of a squirrel eating a burger'''
snake_case__ : Optional[int] = torch.manual_seed(0 )
snake_case__ : Union[str, Any] = sd_pipe(
[prompt] , image=lowerCamelCase , generator=lowerCamelCase , num_inference_steps=2 , output_type='''np''' , ).images
snake_case__ : Optional[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
snake_case__ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
snake_case__ : Dict = '''stabilityai/stable-diffusion-x4-upscaler'''
snake_case__ : Any = StableDiffusionUpscalePipeline.from_pretrained(lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
snake_case__ : str = '''a cat sitting on a park bench'''
snake_case__ : Tuple = torch.manual_seed(0 )
snake_case__ : str = pipe(
prompt=lowerCamelCase , image=lowerCamelCase , generator=lowerCamelCase , output_type='''np''' , )
snake_case__ : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
snake_case__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
snake_case__ : List[str] = '''stabilityai/stable-diffusion-x4-upscaler'''
snake_case__ : Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
snake_case__ : Optional[int] = '''a cat sitting on a park bench'''
snake_case__ : Optional[Any] = torch.manual_seed(0 )
snake_case__ : Dict = pipe(
prompt=lowerCamelCase , image=lowerCamelCase , generator=lowerCamelCase , output_type='''np''' , )
snake_case__ : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
snake_case__ : Optional[Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
snake_case__ : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ : int = '''a cat sitting on a park bench'''
snake_case__ : Optional[int] = torch.manual_seed(0 )
snake_case__ : str = pipe(
prompt=lowerCamelCase , image=lowerCamelCase , generator=lowerCamelCase , num_inference_steps=5 , output_type='''np''' , )
snake_case__ : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 694 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _A ( snake_case__ : str , snake_case__ : str ):
snake_case__ : Tuple = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
snake_case__ : List[str] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
snake_case__ : str = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , snake_case__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
snake_case__ : Any = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
snake_case__ : Any = os.path.join(get_home_dir() , '''models''' )
snake_case__ : List[Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ )
snake_case__ : Optional[int] = nlp.model.BERTModel(
snake_case__ , len(snake_case__ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=snake_case__ , use_decoder=snake_case__ , )
original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ )
snake_case__ : Any = original_bort._collect_params_with_prefix()
# Build our config 🤗
snake_case__ : Union[str, Any] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(snake_case__ ),
}
snake_case__ : Dict = BertConfig.from_dict(snake_case__ )
snake_case__ : Dict = BertForMaskedLM(snake_case__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ : str ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ : List[Any] , snake_case__ : Any ):
snake_case__ : Union[str, Any] = hf_param.shape
snake_case__ : Any = to_torch(params[gluon_param] )
snake_case__ : Dict = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
snake_case__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
snake_case__ : int = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
snake_case__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
snake_case__ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
snake_case__ : str = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
snake_case__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
snake_case__ : BertSelfAttention = layer.attention.self
snake_case__ : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
snake_case__ : Dict = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
snake_case__ : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
snake_case__ : int = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
snake_case__ : List[Any] = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
snake_case__ : List[Any] = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
snake_case__ : BertSelfOutput = layer.attention.output
snake_case__ : Optional[Any] = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
snake_case__ : List[str] = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
snake_case__ : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
snake_case__ : Any = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
snake_case__ : BertIntermediate = layer.intermediate
snake_case__ : int = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
snake_case__ : Optional[int] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
snake_case__ : BertOutput = layer.output
snake_case__ : Any = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
snake_case__ : Tuple = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
snake_case__ : Tuple = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
snake_case__ : Union[str, Any] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
snake_case__ : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' )
snake_case__ : str = tokenizer.encode_plus(snake_case__ )['''input_ids''']
# Get gluon output
snake_case__ : List[str] = mx.nd.array([input_ids] )
snake_case__ : Optional[int] = original_bort(inputs=snake_case__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case__ )
snake_case__ : Optional[Any] = BertModel.from_pretrained(snake_case__ )
hf_bort_model.eval()
snake_case__ : Optional[Any] = tokenizer.encode_plus(snake_case__ , return_tensors='''pt''' )
snake_case__ : str = hf_bort_model(**snake_case__ )[0]
snake_case__ : str = output_gluon[0].asnumpy()
snake_case__ : str = output_hf[0].detach().numpy()
snake_case__ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
snake_case__ : Optional[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 694 | 1 |
'''simple docstring'''
def _A ( snake_case__ : int = 4_00_00_00 ):
snake_case__ : int = []
snake_case__ ,snake_case__ : Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case__ )
snake_case__ ,snake_case__ : Any = b, a + b
return sum(snake_case__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 |
'''simple docstring'''
def _A ( snake_case__ : int = 4_00_00_00 ):
snake_case__ : int = []
snake_case__ ,snake_case__ : Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case__ )
snake_case__ ,snake_case__ : Any = b, a + b
return sum(snake_case__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : List[str] = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = ["MobileNetV2FeatureExtractor"]
_lowerCAmelCase : Optional[int] = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : int = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
_lowerCAmelCase : Optional[int] = {
"google/pegasus-xsum": 5_1_2,
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PegasusTokenizer
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<pad>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<mask_2>" , lowerCamelCase="<mask_1>" , lowerCamelCase=None , lowerCamelCase=103 , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCamelCase )}, but is'''
f''' {type(lowerCamelCase )}''' )
snake_case__ : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCamelCase ) , self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
snake_case__ : List[Any] = additional_special_tokens_extended
else:
snake_case__ : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , pad_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , mask_token=lowerCamelCase , mask_token_sent=lowerCamelCase , offset=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Union[str, Any] = vocab_file
snake_case__ : List[Any] = False if not self.vocab_file else True
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Tuple = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : int = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 694 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowerCamelCase ) , 'Tatoeba directory does not exist.' )
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCamelCase )
@slow
def lowercase__ ( self ) -> Any:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ ,snake_case__ : Union[str, Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=lowerCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 694 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=0 ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[Any] = 1.0 if scale is None else scale
snake_case__ : Dict = 0.0 if loc is None else loc
super().__init__(lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase )] )
@property
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return self.variance.sqrt()
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Tuple = args_dim
snake_case__ : str = nn.ModuleList([nn.Linear(lowerCamelCase , lowerCamelCase ) for dim in args_dim.values()] )
snake_case__ : Optional[int] = domain_map
def lowercase__ ( self , lowerCamelCase ) -> Tuple[torch.Tensor]:
"""simple docstring"""
snake_case__ : Any = [proj(lowerCamelCase ) for proj in self.proj]
return self.domain_map(*lowerCamelCase )
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Tuple = function
def lowercase__ ( self , lowerCamelCase , *lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.function(lowerCamelCase , *lowerCamelCase )
class snake_case :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self , lowerCamelCase = 1 ) -> None:
"""simple docstring"""
snake_case__ : Optional[Any] = dim
snake_case__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowerCamelCase )
else:
return Independent(self.distribution_class(*lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Distribution:
"""simple docstring"""
snake_case__ : List[Any] = self._base_distribution(lowerCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase , loc=lowerCamelCase , scale=lowerCamelCase , event_dim=self.event_dim )
@property
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def lowercase__ ( self ) -> float:
"""simple docstring"""
return 0.0
def lowercase__ ( self , lowerCamelCase ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowercase__ ( self , *lowerCamelCase ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def lowercase__ ( lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Tuple = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
snake_case__ : Optional[int] = 2.0 + cls.squareplus(lowerCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowercase__ ( self , lowerCamelCase ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : str = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 | 1 |
'''simple docstring'''
from math import factorial
def _A ( snake_case__ : int = 20 ):
snake_case__ : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case__ : Union[str, Any] = n // 2
return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
_lowerCAmelCase : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 694 |
'''simple docstring'''
from math import factorial
def _A ( snake_case__ : int = 20 ):
snake_case__ : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case__ : Union[str, Any] = n // 2
return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
_lowerCAmelCase : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 694 | 1 |
'''simple docstring'''
def _A ( snake_case__ : int , snake_case__ : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = (EulerDiscreteScheduler,)
_lowerCAmelCase = 1_0
def lowercase__ ( self , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Any = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCamelCase )
return config
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Any = self.get_scheduler_config()
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : Any = self.dummy_model()
snake_case__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : List[Any] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : int = model(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Tuple = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Tuple = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Optional[Any] = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : Optional[int] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : Union[str, Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Union[str, Any] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : List[str] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Optional[int] = self.get_scheduler_config()
snake_case__ : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Tuple = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : str = model(lowerCamelCase , lowerCamelCase )
snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : int = output.prev_sample
snake_case__ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : int = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Dict = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config()
snake_case__ : List[Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Dict = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Optional[Any] = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : Dict = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
| 694 | 1 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _A ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case__ ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def _A ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def _A ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case__ ):
http_head('''https://huggingface.co''' )
| 694 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = ['pixel_values']
def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : int = do_resize
snake_case__ : Dict = do_rescale
snake_case__ : Any = size_divisor
snake_case__ : str = resample
super().__init__(**lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
snake_case__ ,snake_case__ : Any = get_image_size(lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
snake_case__ : Any = height // size_divisor * size_divisor
snake_case__ : Union[str, Any] = width // size_divisor * size_divisor
snake_case__ : Tuple = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
return image
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> BatchFeature:
"""simple docstring"""
snake_case__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : Any = size_divisor if size_divisor is not None else self.size_divisor
snake_case__ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
snake_case__ : Optional[Any] = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
snake_case__ : Optional[int] = [to_numpy_array(lowerCamelCase ) for img in images]
if do_resize:
snake_case__ : Union[str, Any] = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
snake_case__ : str = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images]
snake_case__ : Tuple = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
snake_case__ : str = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 694 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , ) -> Any:
"""simple docstring"""
snake_case__ : str = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case__ : Tuple = parent
snake_case__ : int = batch_size
snake_case__ : Optional[Any] = num_channels
snake_case__ : Tuple = image_size
snake_case__ : int = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : Any = size
snake_case__ : Optional[Any] = do_normalize
snake_case__ : Optional[Any] = image_mean
snake_case__ : int = image_std
def lowercase__ ( self ) -> str:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = DPTImageProcessor if is_vision_available() else None
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Optional[int] = DPTImageProcessingTester(self )
@property
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''size''' ) )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
snake_case__ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case__ : Dict = image_processing(lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case__ : str = image_processing(lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case__ : Tuple = image_processing(lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 694 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ):
snake_case__ : List[Any] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
| 694 | 1 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=99 , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=9 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=8 , lowerCamelCase=0.1 , lowerCamelCase=0.002 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=None , lowerCamelCase=None , ) -> str:
"""simple docstring"""
snake_case__ : Union[str, Any] = parent
snake_case__ : str = batch_size
snake_case__ : Union[str, Any] = encoder_seq_length
snake_case__ : List[Any] = decoder_seq_length
# For common tests
snake_case__ : Dict = self.decoder_seq_length
snake_case__ : Dict = is_training
snake_case__ : Optional[Any] = use_attention_mask
snake_case__ : Union[str, Any] = use_labels
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : Any = d_ff
snake_case__ : Any = relative_attention_num_buckets
snake_case__ : int = dropout_rate
snake_case__ : Union[str, Any] = initializer_factor
snake_case__ : Optional[int] = eos_token_id
snake_case__ : List[Any] = pad_token_id
snake_case__ : Union[str, Any] = decoder_start_token_id
snake_case__ : List[Any] = None
snake_case__ : Any = decoder_layers
def lowercase__ ( self ) -> int:
"""simple docstring"""
return TaConfig.from_pretrained('''google/umt5-base''' )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ) -> Tuple:
"""simple docstring"""
if attention_mask is None:
snake_case__ : List[str] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case__ : List[str] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case__ : Tuple = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase )
if decoder_head_mask is None:
snake_case__ : Optional[Any] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
if cross_attn_head_mask is None:
snake_case__ : str = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case__ : Any = input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : int = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Tuple = self.get_config()
snake_case__ : List[str] = config.num_attention_heads
snake_case__ : Union[str, Any] = self.prepare_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, input_dict
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ ,snake_case__ : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> List[str]:
"""simple docstring"""
snake_case__ : Tuple = UMTaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : int = model(
input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase , attention_mask=lowerCamelCase , decoder_attention_mask=lowerCamelCase , )
snake_case__ : Optional[int] = model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase )
snake_case__ : Optional[int] = result.last_hidden_state
snake_case__ : int = result.past_key_values
snake_case__ : List[str] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> List[str]:
"""simple docstring"""
snake_case__ : List[str] = UMTaModel(config=lowerCamelCase ).get_decoder().to(lowerCamelCase ).eval()
# first forward pass
snake_case__ : str = model(lowerCamelCase , use_cache=lowerCamelCase )
snake_case__ : int = model(lowerCamelCase )
snake_case__ : Optional[Any] = model(lowerCamelCase , use_cache=lowerCamelCase )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) + 1 )
snake_case__ ,snake_case__ : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ : List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : Optional[int] = model(lowerCamelCase )['''last_hidden_state''']
snake_case__ : Union[str, Any] = model(lowerCamelCase , past_key_values=lowerCamelCase )['''last_hidden_state''']
# select random slice
snake_case__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : str = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Dict = UMTaModel(config=lowerCamelCase ).to(lowerCamelCase ).half().eval()
snake_case__ : List[Any] = model(**lowerCamelCase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(lowerCamelCase ).any().item() )
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_lowerCAmelCase = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_lowerCAmelCase = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_lowerCAmelCase = [0.8, 0.9]
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Any = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ : int = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=lowerCamelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Optional[Any] = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Any = config_and_inputs[0]
snake_case__ : int = UMTaForConditionalGeneration(lowerCamelCase ).eval()
model.to(lowerCamelCase )
snake_case__ : List[Any] = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
}
for attn_name, (name, mask) in zip(lowerCamelCase , head_masking.items() ):
snake_case__ : int = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case__ : Optional[Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCamelCase )
snake_case__ : List[str] = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase , return_dict_in_generate=lowerCamelCase , **lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case__ : Any = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Any = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=lowerCamelCase ).to(lowerCamelCase )
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=lowerCamelCase , legacy=lowerCamelCase )
snake_case__ : Optional[int] = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
snake_case__ : List[Any] = tokenizer(lowerCamelCase , return_tensors='''pt''' , padding=lowerCamelCase ).input_ids
# fmt: off
snake_case__ : Union[str, Any] = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase , lowerCamelCase )
snake_case__ : Any = model.generate(input_ids.to(lowerCamelCase ) )
snake_case__ : int = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
snake_case__ : Dict = tokenizer.batch_decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 694 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
snake_case__ : Optional[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'dpt'
def __init__( self , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=384 , lowerCamelCase=16 , lowerCamelCase=3 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=[2, 5, 8, 11] , lowerCamelCase="project" , lowerCamelCase=[4, 2, 1, 0.5] , lowerCamelCase=[96, 192, 384, 768] , lowerCamelCase=256 , lowerCamelCase=-1 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=255 , lowerCamelCase=0.1 , lowerCamelCase=[1, 1024, 24, 24] , lowerCamelCase=[0, 1] , lowerCamelCase=None , **lowerCamelCase , ) -> Tuple:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Optional[int] = hidden_size
snake_case__ : Tuple = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case__ : List[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
snake_case__ : Any = BitConfig(**lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case__ : Optional[int] = BitConfig(**lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : List[Any] = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
snake_case__ : Optional[int] = backbone_featmap_shape
snake_case__ : int = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
snake_case__ : List[Any] = None
snake_case__ : Dict = None
snake_case__ : Dict = []
snake_case__ : List[str] = num_hidden_layers
snake_case__ : Tuple = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : Any = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : List[Any] = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : Optional[Any] = image_size
snake_case__ : int = patch_size
snake_case__ : Any = num_channels
snake_case__ : Optional[int] = qkv_bias
snake_case__ : Dict = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
snake_case__ : str = readout_type
snake_case__ : List[Any] = reassemble_factors
snake_case__ : Dict = neck_hidden_sizes
snake_case__ : Any = fusion_hidden_size
snake_case__ : str = head_in_index
snake_case__ : Union[str, Any] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case__ : str = use_auxiliary_head
snake_case__ : Union[str, Any] = auxiliary_loss_weight
snake_case__ : List[str] = semantic_loss_ignore_index
snake_case__ : Dict = semantic_classifier_dropout
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case__ : List[str] = self.backbone_config.to_dict()
snake_case__ : int = self.__class__.model_type
return output
| 694 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCAmelCase : Union[str, Any] = "\nimport os\n"
_lowerCAmelCase : Optional[int] = "\ndef foo():\n import os\n return False\n"
_lowerCAmelCase : Union[str, Any] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
_lowerCAmelCase : str = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
_lowerCAmelCase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
_lowerCAmelCase : List[str] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
_lowerCAmelCase : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case__ )
def _A ( snake_case__ : List[str] , snake_case__ : Dict ):
snake_case__ : str = os.path.join(snake_case__ , '''test_file.py''' )
with open(snake_case__ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case__ )
snake_case__ : int = get_imports(snake_case__ )
assert parsed_imports == ["os"]
| 694 | 1 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 694 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'markuplm'
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=256 , lowerCamelCase=1024 , lowerCamelCase=216 , lowerCamelCase=1001 , lowerCamelCase=32 , lowerCamelCase=50 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> str:
"""simple docstring"""
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Optional[int] = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : List[Any] = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Optional[Any] = position_embedding_type
snake_case__ : Dict = use_cache
snake_case__ : int = classifier_dropout
# additional properties
snake_case__ : Union[str, Any] = max_depth
snake_case__ : Dict = max_xpath_tag_unit_embeddings
snake_case__ : Any = max_xpath_subs_unit_embeddings
snake_case__ : int = tag_pad_id
snake_case__ : Tuple = subs_pad_id
snake_case__ : Dict = xpath_unit_hidden_size
| 694 | 1 |
'''simple docstring'''
def _A ( snake_case__ : int ):
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
snake_case__ : Any = str(snake_case__ )
snake_case__ : Optional[Any] = ''''''.join(sorted(snake_case__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _A ( snake_case__ : float = 99 ):
if not 0 < percent < 1_00:
raise ValueError('''solution() only accepts values from 0 to 100''' )
snake_case__ : int = 0
snake_case__ : Any = 1
while True:
if check_bouncy(snake_case__ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(9_9)}''')
| 694 |
'''simple docstring'''
def _A ( snake_case__ : float ):
return 10 - x * x
def _A ( snake_case__ : float , snake_case__ : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case__ ) * equation(snake_case__ ) >= 0:
raise ValueError('''Wrong space!''' )
snake_case__ : List[str] = a
while (b - a) >= 0.01:
# Find middle point
snake_case__ : Optional[int] = (a + b) / 2
# Check if middle point is root
if equation(snake_case__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case__ ) * equation(snake_case__ ) < 0:
snake_case__ : Dict = c
else:
snake_case__ : List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 694 | 1 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_lowerCAmelCase : Optional[Any] = TypeVar("KEY")
_lowerCAmelCase : Optional[int] = TypeVar("VAL")
@dataclass(frozen=__lowerCamelCase , slots=__lowerCamelCase )
class snake_case ( Generic[KEY, VAL] ):
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
class snake_case ( _Item ):
"""simple docstring"""
def __init__( self ) -> None:
"""simple docstring"""
super().__init__(lowerCamelCase , lowerCamelCase )
def __bool__( self ) -> bool:
"""simple docstring"""
return False
_lowerCAmelCase : Any = _DeletedItem()
class snake_case ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self , lowerCamelCase = 8 , lowerCamelCase = 0.75 ) -> None:
"""simple docstring"""
snake_case__ : int = initial_block_size
snake_case__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case__ : Any = capacity_factor
snake_case__ : Union[str, Any] = 0
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
return hash(lowerCamelCase ) % len(self._buckets )
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> bool:
"""simple docstring"""
snake_case__ : Dict = self._buckets[ind]
if not stored:
snake_case__ : Optional[Any] = _Item(lowerCamelCase , lowerCamelCase )
self._len += 1
return True
elif stored.key == key:
snake_case__ : Any = _Item(lowerCamelCase , lowerCamelCase )
return True
else:
return False
def lowercase__ ( self ) -> bool:
"""simple docstring"""
snake_case__ : List[str] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase )
def lowercase__ ( self ) -> bool:
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case__ : Dict = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowercase__ ( self , lowerCamelCase ) -> None:
"""simple docstring"""
snake_case__ : int = self._buckets
snake_case__ : Optional[int] = [None] * new_size
snake_case__ : str = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowercase__ ( self ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def lowercase__ ( self ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def lowercase__ ( self , lowerCamelCase ) -> Iterator[int]:
"""simple docstring"""
snake_case__ : Union[str, Any] = self._get_bucket_index(lowerCamelCase )
for _ in range(len(self._buckets ) ):
yield ind
snake_case__ : List[str] = self._get_next_ind(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(lowerCamelCase ):
if self._try_set(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
break
def __setitem__( self , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase , lowerCamelCase )
def __delitem__( self , lowerCamelCase ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(lowerCamelCase ):
snake_case__ : List[Any] = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase )
if item is _deleted:
continue
if item.key == key:
snake_case__ : Optional[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , lowerCamelCase ) -> VAL:
"""simple docstring"""
for ind in self._iterate_buckets(lowerCamelCase ):
snake_case__ : Optional[int] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase )
def __len__( self ) -> int:
"""simple docstring"""
return self._len
def __iter__( self ) -> Iterator[KEY]:
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
"""simple docstring"""
snake_case__ : str = ''' ,'''.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 694 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : list[float] , snake_case__ : list[float] ):
snake_case__ : Dict = sorted(numsa + numsa )
snake_case__ ,snake_case__ : Tuple = divmod(len(snake_case__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCAmelCase : List[str] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
_lowerCAmelCase : Any = []
def _A ( snake_case__ : list[list[int]] , snake_case__ : int , snake_case__ : int ):
for i in range(len(snake_case__ ) ):
if board[row][i] == 1:
return False
for i in range(len(snake_case__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(snake_case__ , -1 , -1 ) , range(snake_case__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(snake_case__ , -1 , -1 ) , range(snake_case__ , len(snake_case__ ) ) ):
if board[i][j] == 1:
return False
return True
def _A ( snake_case__ : list[list[int]] , snake_case__ : int ):
if row >= len(snake_case__ ):
solution.append(snake_case__ )
printboard(snake_case__ )
print()
return True
for i in range(len(snake_case__ ) ):
if is_safe(snake_case__ , snake_case__ , snake_case__ ):
snake_case__ : Optional[Any] = 1
solve(snake_case__ , row + 1 )
snake_case__ : List[str] = 0
return False
def _A ( snake_case__ : list[list[int]] ):
for i in range(len(snake_case__ ) ):
for j in range(len(snake_case__ ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
_lowerCAmelCase : List[str] = 8
_lowerCAmelCase : int = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 694 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Any = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 | 1 |
'''simple docstring'''
def _A ( snake_case__ : int = 10_00 ):
snake_case__ ,snake_case__ : List[str] = 1, 1
snake_case__ : List[str] = 2
while True:
snake_case__ : Dict = 0
snake_case__ : List[Any] = fa + fa
snake_case__ ,snake_case__ : Union[str, Any] = fa, f
index += 1
for _ in str(snake_case__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 694 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'encoder-decoder'
_lowerCAmelCase = True
def __init__( self , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case__ : List[str] = kwargs.pop('''encoder''' )
snake_case__ : Any = encoder_config.pop('''model_type''' )
snake_case__ : List[str] = kwargs.pop('''decoder''' )
snake_case__ : str = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case__ : Tuple = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : Optional[Any] = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : str = True
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case__ : Optional[int] = True
snake_case__ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = copy.deepcopy(self.__dict__ )
snake_case__ : List[Any] = self.encoder.to_dict()
snake_case__ : str = self.decoder.to_dict()
snake_case__ : Any = self.__class__.model_type
return output
| 694 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : str = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'canine'
def __init__( self , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=16384 , lowerCamelCase=16 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=0 , lowerCamelCase=0xe_000 , lowerCamelCase=0xe_001 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase=8 , lowerCamelCase=16384 , lowerCamelCase=128 , **lowerCamelCase , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Dict = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : List[str] = hidden_act
snake_case__ : Dict = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : Any = initializer_range
snake_case__ : str = type_vocab_size
snake_case__ : Any = layer_norm_eps
# Character config:
snake_case__ : Union[str, Any] = downsampling_rate
snake_case__ : Dict = upsampling_kernel_size
snake_case__ : Tuple = num_hash_functions
snake_case__ : List[str] = num_hash_buckets
snake_case__ : List[str] = local_transformer_stride
| 694 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
_lowerCAmelCase : Dict = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
_lowerCAmelCase : str = {"vinai/bartpho-syllable": 1_0_2_4}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
snake_case__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
snake_case__ : int = vocab_file
snake_case__ : Optional[Any] = monolingual_vocab_file
snake_case__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : List[str] = cnt
cnt += 1
with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
snake_case__ : Optional[int] = line.strip().split()[0]
snake_case__ : List[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : Any = len(self.fairseq_tokens_to_ids )
snake_case__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = self.__dict__.copy()
snake_case__ : Any = None
snake_case__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : Dict = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : List[str] = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : int = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , '''wb''' ) as fi:
snake_case__ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowerCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 1 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _A ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Any ):
if isinstance(snake_case__ , torch.Tensor ):
return image
elif isinstance(snake_case__ , PIL.Image.Image ):
snake_case__ : Optional[int] = [image]
if isinstance(image[0] , PIL.Image.Image ):
snake_case__ : Dict = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
snake_case__ : List[Any] = np.concatenate(snake_case__ , axis=0 )
snake_case__ : int = np.array(snake_case__ ).astype(np.floataa ) / 2_55.0
snake_case__ : Tuple = image.transpose(0 , 3 , 1 , 2 )
snake_case__ : List[str] = 2.0 * image - 1.0
snake_case__ : str = torch.from_numpy(snake_case__ )
elif isinstance(image[0] , torch.Tensor ):
snake_case__ : Dict = torch.cat(snake_case__ , dim=0 )
return image
def _A ( snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int=0.99_95 ):
if not isinstance(snake_case__ , np.ndarray ):
snake_case__ : List[str] = True
snake_case__ : Dict = va.device
snake_case__ : int = va.cpu().numpy()
snake_case__ : List[str] = va.cpu().numpy()
snake_case__ : int = np.sum(va * va / (np.linalg.norm(snake_case__ ) * np.linalg.norm(snake_case__ )) )
if np.abs(snake_case__ ) > DOT_THRESHOLD:
snake_case__ : int = (1 - t) * va + t * va
else:
snake_case__ : Union[str, Any] = np.arccos(snake_case__ )
snake_case__ : int = np.sin(snake_case__ )
snake_case__ : Optional[Any] = theta_a * t
snake_case__ : List[Any] = np.sin(snake_case__ )
snake_case__ : Dict = np.sin(theta_a - theta_t ) / sin_theta_a
snake_case__ : Any = sin_theta_t / sin_theta_a
snake_case__ : Any = sa * va + sa * va
if inputs_are_torch:
snake_case__ : Union[str, Any] = torch.from_numpy(snake_case__ ).to(snake_case__ )
return va
def _A ( snake_case__ : str , snake_case__ : Dict ):
snake_case__ : Union[str, Any] = F.normalize(snake_case__ , dim=-1 )
snake_case__ : Any = F.normalize(snake_case__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _A ( snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
for param in model.parameters():
snake_case__ : Union[str, Any] = value
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=lowerCamelCase , text_encoder=lowerCamelCase , clip_model=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , feature_extractor=lowerCamelCase , coca_model=lowerCamelCase , coca_tokenizer=lowerCamelCase , coca_transform=lowerCamelCase , )
snake_case__ : List[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , lowerCamelCase )
else feature_extractor.size['''shortest_edge''']
)
snake_case__ : Optional[Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , lowerCamelCase )
set_requires_grad(self.clip_model , lowerCamelCase )
def lowercase__ ( self , lowerCamelCase = "auto" ) -> Dict:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case__ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(lowerCamelCase )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
set_requires_grad(self.vae , lowerCamelCase )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
set_requires_grad(self.vae , lowerCamelCase )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
set_requires_grad(self.unet , lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
set_requires_grad(self.unet , lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : Dict = min(int(num_inference_steps * strength ) , lowerCamelCase )
snake_case__ : List[Any] = max(num_inference_steps - init_timestep , 0 )
snake_case__ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ) -> List[str]:
"""simple docstring"""
if not isinstance(lowerCamelCase , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(lowerCamelCase )}''' )
snake_case__ : str = image.to(device=lowerCamelCase , dtype=lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : Any = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCamelCase )
]
snake_case__ : int = torch.cat(lowerCamelCase , dim=0 )
else:
snake_case__ : List[str] = self.vae.encode(lowerCamelCase ).latent_dist.sample(lowerCamelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case__ : Optional[int] = 0.18_215 * init_latents
snake_case__ : str = init_latents.repeat_interleave(lowerCamelCase , dim=0 )
snake_case__ : List[str] = randn_tensor(init_latents.shape , generator=lowerCamelCase , device=lowerCamelCase , dtype=lowerCamelCase )
# get latents
snake_case__ : Optional[int] = self.scheduler.add_noise(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case__ : List[Any] = init_latents
return latents
def lowercase__ ( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = self.coca_transform(lowerCamelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
snake_case__ : List[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
snake_case__ : int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Any:
"""simple docstring"""
snake_case__ : Optional[int] = self.feature_extractor.preprocess(lowerCamelCase )
snake_case__ : int = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
snake_case__ : Optional[Any] = self.clip_model.get_image_features(lowerCamelCase )
snake_case__ : str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCamelCase )
snake_case__ : Optional[int] = image_embeddings_clip.repeat_interleave(lowerCamelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Any:
"""simple docstring"""
snake_case__ : List[str] = latents.detach().requires_grad_()
snake_case__ : List[Any] = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
snake_case__ : int = self.unet(lowerCamelCase , lowerCamelCase , encoder_hidden_states=lowerCamelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
snake_case__ : str = self.scheduler.alphas_cumprod[timestep]
snake_case__ : str = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ : Any = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
snake_case__ : Optional[Any] = torch.sqrt(lowerCamelCase )
snake_case__ : List[str] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , lowerCamelCase ):
snake_case__ : str = self.scheduler.sigmas[index]
snake_case__ : Optional[Any] = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case__ : Union[str, Any] = 1 / 0.18_215 * sample
snake_case__ : str = self.vae.decode(lowerCamelCase ).sample
snake_case__ : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case__ : Tuple = transforms.Resize(self.feature_extractor_size )(lowerCamelCase )
snake_case__ : Union[str, Any] = self.normalize(lowerCamelCase ).to(latents.dtype )
snake_case__ : Optional[Any] = self.clip_model.get_image_features(lowerCamelCase )
snake_case__ : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCamelCase )
snake_case__ : Dict = spherical_dist_loss(lowerCamelCase , lowerCamelCase ).mean() * clip_guidance_scale
snake_case__ : Dict = -torch.autograd.grad(lowerCamelCase , lowerCamelCase )[0]
if isinstance(self.scheduler , lowerCamelCase ):
snake_case__ : int = latents.detach() + grads * (sigma**2)
snake_case__ : Union[str, Any] = noise_pred_original
else:
snake_case__ : Union[str, Any] = noise_pred_original - torch.sqrt(lowerCamelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 512 , lowerCamelCase = 512 , lowerCamelCase = 0.6 , lowerCamelCase = 50 , lowerCamelCase = 7.5 , lowerCamelCase = 1 , lowerCamelCase = 0.0 , lowerCamelCase = 100 , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , lowerCamelCase = 0.8 , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , ) -> List[str]:
"""simple docstring"""
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(lowerCamelCase )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(lowerCamelCase , torch.Generator ) and batch_size > 1:
snake_case__ : str = [generator] + [None] * (batch_size - 1)
snake_case__ : Any = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
snake_case__ : List[str] = [x[0] for x in coca_is_none if x[1]]
snake_case__ : Union[str, Any] = ''', '''.join(lowerCamelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCamelCase ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
snake_case__ : Dict = self.get_image_description(lowerCamelCase )
if style_prompt is None:
if len(lowerCamelCase ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
snake_case__ : Optional[int] = self.get_image_description(lowerCamelCase )
# get prompt text embeddings for content and style
snake_case__ : List[str] = self.tokenizer(
lowerCamelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=lowerCamelCase , return_tensors='''pt''' , )
snake_case__ : Optional[int] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
snake_case__ : Any = self.tokenizer(
lowerCamelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=lowerCamelCase , return_tensors='''pt''' , )
snake_case__ : Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
snake_case__ : List[Any] = slerp(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# duplicate text embeddings for each generation per prompt
snake_case__ : Tuple = text_embeddings.repeat_interleave(lowerCamelCase , dim=0 )
# set timesteps
snake_case__ : Optional[Any] = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
snake_case__ : Any = {}
if accepts_offset:
snake_case__ : Dict = 1
self.scheduler.set_timesteps(lowerCamelCase , **lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
snake_case__ ,snake_case__ : Optional[int] = self.get_timesteps(lowerCamelCase , lowerCamelCase , self.device )
snake_case__ : Any = timesteps[:1].repeat(lowerCamelCase )
# Preprocess image
snake_case__ : Union[str, Any] = preprocess(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case__ : Dict = self.prepare_latents(
lowerCamelCase , lowerCamelCase , lowerCamelCase , text_embeddings.dtype , self.device , lowerCamelCase )
snake_case__ : str = preprocess(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case__ : List[Any] = self.prepare_latents(
lowerCamelCase , lowerCamelCase , lowerCamelCase , text_embeddings.dtype , self.device , lowerCamelCase )
snake_case__ : List[Any] = slerp(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if clip_guidance_scale > 0:
snake_case__ : Union[str, Any] = self.get_clip_image_embeddings(lowerCamelCase , lowerCamelCase )
snake_case__ : List[Any] = self.get_clip_image_embeddings(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = slerp(
lowerCamelCase , lowerCamelCase , lowerCamelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case__ : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case__ : List[Any] = content_text_input.input_ids.shape[-1]
snake_case__ : Any = self.tokenizer([''''''] , padding='''max_length''' , max_length=lowerCamelCase , return_tensors='''pt''' )
snake_case__ : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
snake_case__ : List[Any] = uncond_embeddings.repeat_interleave(lowerCamelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case__ : Tuple = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case__ : int = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
snake_case__ : Tuple = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
snake_case__ : List[Any] = torch.randn(lowerCamelCase , generator=lowerCamelCase , device='''cpu''' , dtype=lowerCamelCase ).to(
self.device )
else:
snake_case__ : Union[str, Any] = torch.randn(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
snake_case__ : List[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case__ : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case__ : Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case__ : Dict = {}
if accepts_eta:
snake_case__ : Any = eta
# check if the scheduler accepts generator
snake_case__ : Union[str, Any] = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
snake_case__ : Dict = generator
with self.progress_bar(total=lowerCamelCase ):
for i, t in enumerate(lowerCamelCase ):
# expand the latents if we are doing classifier free guidance
snake_case__ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case__ : List[Any] = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
snake_case__ : List[Any] = self.unet(lowerCamelCase , lowerCamelCase , encoder_hidden_states=lowerCamelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
snake_case__ ,snake_case__ : List[str] = noise_pred.chunk(2 )
snake_case__ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
snake_case__ : str = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
snake_case__ ,snake_case__ : Optional[Any] = self.cond_fn(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
# compute the previous noisy sample x_t -> x_t-1
snake_case__ : Tuple = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case__ : int = 1 / 0.18_215 * latents
snake_case__ : List[Any] = self.vae.decode(lowerCamelCase ).sample
snake_case__ : Any = (image / 2 + 0.5).clamp(0 , 1 )
snake_case__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case__ : Union[str, Any] = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCamelCase , nsfw_content_detected=lowerCamelCase )
| 694 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowerCAmelCase : str = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowerCAmelCase : Optional[int] = parser.parse_args()
_lowerCAmelCase : Union[str, Any] = "cpu"
_lowerCAmelCase : List[str] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowerCAmelCase : Union[str, Any] = "path-to-your-trained-model"
_lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowerCAmelCase : Optional[Any] = pipe.to(device)
# to channels last
_lowerCAmelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last)
_lowerCAmelCase : str = pipe.vae.to(memory_format=torch.channels_last)
_lowerCAmelCase : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowerCAmelCase : Optional[int] = torch.randn(2, 4, 6_4, 6_4)
_lowerCAmelCase : List[str] = torch.rand(1) * 9_9_9
_lowerCAmelCase : Optional[int] = torch.randn(2, 7_7, 7_6_8)
_lowerCAmelCase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowerCAmelCase : Tuple = 6_6_6
_lowerCAmelCase : str = torch.Generator(device).manual_seed(seed)
_lowerCAmelCase : Dict = {"generator": generator}
if args.steps is not None:
_lowerCAmelCase : Tuple = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowerCAmelCase : Any = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 694 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_lowerCAmelCase = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[str] = AudioClassificationPipeline(model=lowerCamelCase , feature_extractor=lowerCamelCase )
# test with a raw waveform
snake_case__ : Any = np.zeros((34000,) )
snake_case__ : Dict = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ ,snake_case__ : Any = examples
snake_case__ : Dict = audio_classifier(lowerCamelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowerCamelCase , [
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
] , )
snake_case__ : Tuple = audio_classifier(lowerCamelCase , top_k=1 )
self.assertEqual(
lowerCamelCase , [
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
] , )
self.run_torchaudio(lowerCamelCase )
@require_torchaudio
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
import datasets
# test with a local file
snake_case__ : List[str] = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
snake_case__ : List[str] = dataset[0]['''audio''']['''array''']
snake_case__ : List[str] = audio_classifier(lowerCamelCase )
self.assertEqual(
lowerCamelCase , [
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
] , )
@require_torch
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : List[Any] = '''anton-l/wav2vec2-random-tiny-classifier'''
snake_case__ : List[Any] = pipeline('''audio-classification''' , model=lowerCamelCase )
snake_case__ : Optional[Any] = np.ones((8000,) )
snake_case__ : List[str] = audio_classifier(lowerCamelCase , top_k=4 )
snake_case__ : str = [
{'''score''': 0.0_842, '''label''': '''no'''},
{'''score''': 0.0_838, '''label''': '''up'''},
{'''score''': 0.0_837, '''label''': '''go'''},
{'''score''': 0.0_834, '''label''': '''right'''},
]
snake_case__ : Any = [
{'''score''': 0.0_845, '''label''': '''stop'''},
{'''score''': 0.0_844, '''label''': '''on'''},
{'''score''': 0.0_841, '''label''': '''right'''},
{'''score''': 0.0_834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
snake_case__ : Any = {'''array''': np.ones((8000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
snake_case__ : List[str] = audio_classifier(lowerCamelCase , top_k=4 )
self.assertIn(nested_simplify(lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
import datasets
snake_case__ : List[Any] = '''superb/wav2vec2-base-superb-ks'''
snake_case__ : str = pipeline('''audio-classification''' , model=lowerCamelCase )
snake_case__ : List[Any] = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
snake_case__ : Dict = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
snake_case__ : str = audio_classifier(lowerCamelCase , top_k=4 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=3 ) , [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
pass
| 694 |
'''simple docstring'''
import socket
def _A ( ):
snake_case__ : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case__ : str = socket.gethostname()
snake_case__ : Union[str, Any] = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
snake_case__ : int = sock.recv(10_24 )
if not data:
break
out_file.write(snake_case__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 694 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_lowerCAmelCase : Tuple = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
snake_case__ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _A ( snake_case__ : Dict ):
if hor == 1_28:
snake_case__ : List[Any] = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
snake_case__ : List[Any] = (32, 1_28, 2_56)
snake_case__ : Tuple = ('''UpResnetBlock1D''', '''UpResnetBlock1D''')
elif hor == 32:
snake_case__ : str = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
snake_case__ : str = (32, 64, 1_28, 2_56)
snake_case__ : List[Any] = ('''UpResnetBlock1D''', '''UpResnetBlock1D''', '''UpResnetBlock1D''')
snake_case__ : Dict = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
snake_case__ : int = model.state_dict()
snake_case__ : Any = {
'''down_block_types''': down_block_types,
'''block_out_channels''': block_out_channels,
'''up_block_types''': up_block_types,
'''layers_per_block''': 1,
'''use_timestep_embedding''': True,
'''out_block_type''': '''OutConv1DBlock''',
'''norm_num_groups''': 8,
'''downsample_each_block''': False,
'''in_channels''': 14,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''sample_size''': 6_55_36,
'''mid_block_type''': '''MidResTemporalBlock1D''',
'''act_fn''': '''mish''',
}
snake_case__ : List[str] = UNetaDModel(**snake_case__ )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
snake_case__ : List[Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
snake_case__ : Optional[Any] = state_dict.pop(snake_case__ )
hf_value_function.load_state_dict(snake_case__ )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , '''w''' ) as f:
json.dump(snake_case__ , snake_case__ )
def _A ( ):
snake_case__ : Tuple = {
'''in_channels''': 14,
'''down_block_types''': ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D'''),
'''up_block_types''': (),
'''out_block_type''': '''ValueFunction''',
'''mid_block_type''': '''ValueFunctionMidBlock1D''',
'''block_out_channels''': (32, 64, 1_28, 2_56),
'''layers_per_block''': 1,
'''downsample_each_block''': True,
'''sample_size''': 6_55_36,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''use_timestep_embedding''': True,
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''norm_num_groups''': 8,
'''act_fn''': '''mish''',
}
snake_case__ : Optional[Any] = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' )
snake_case__ : int = model
snake_case__ : Tuple = UNetaDModel(**snake_case__ )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
snake_case__ : Optional[int] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
snake_case__ : Optional[int] = state_dict.pop(snake_case__ )
hf_value_function.load_state_dict(snake_case__ )
torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' )
with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f:
json.dump(snake_case__ , snake_case__ )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 694 |
'''simple docstring'''
from math import isqrt
def _A ( snake_case__ : int ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) )
def _A ( snake_case__ : int = 10**6 ):
snake_case__ : str = 0
snake_case__ : List[str] = 1
snake_case__ : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 1 |
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 6_5_5_2_1
def _A ( snake_case__ : str ):
snake_case__ : Dict = 1
snake_case__ : List[str] = 0
for plain_chr in plain_text:
snake_case__ : Tuple = (a + ord(snake_case__ )) % MOD_ADLER
snake_case__ : Union[str, Any] = (b + a) % MOD_ADLER
return (b << 16) | a
| 694 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_lowerCAmelCase : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
_lowerCAmelCase : Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
_lowerCAmelCase : List[str] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 , lowerCamelCase="binary" , lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = fa_score(
lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase )
return {"f1": float(lowerCamelCase ) if score.size == 1 else score}
| 694 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : Optional[Any] = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 42
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase = 65536 , lowerCamelCase = None , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 0 , lowerCamelCase = "fourier" , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = 0.0 , lowerCamelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase = "UNetMidBlock1D" , lowerCamelCase = None , lowerCamelCase = (32, 32, 64) , lowerCamelCase = None , lowerCamelCase = 8 , lowerCamelCase = 1 , lowerCamelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
snake_case__ : Optional[int] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase , log=lowerCamelCase , flip_sin_to_cos=lowerCamelCase )
snake_case__ : List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case__ : Dict = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase , downscale_freq_shift=lowerCamelCase )
snake_case__ : Dict = block_out_channels[0]
if use_timestep_embedding:
snake_case__ : Any = block_out_channels[0] * 4
snake_case__ : Optional[Any] = TimestepEmbedding(
in_channels=lowerCamelCase , time_embed_dim=lowerCamelCase , act_fn=lowerCamelCase , out_dim=block_out_channels[0] , )
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : List[Any] = None
snake_case__ : Union[str, Any] = nn.ModuleList([] )
snake_case__ : List[str] = None
# down
snake_case__ : Tuple = in_channels
for i, down_block_type in enumerate(lowerCamelCase ):
snake_case__ : Tuple = output_channel
snake_case__ : List[str] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case__ : List[Any] = i == len(lowerCamelCase ) - 1
snake_case__ : Dict = get_down_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase )
# mid
snake_case__ : Optional[int] = get_mid_block(
lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase , add_downsample=lowerCamelCase , )
# up
snake_case__ : Union[str, Any] = list(reversed(lowerCamelCase ) )
snake_case__ : Any = reversed_block_out_channels[0]
if out_block_type is None:
snake_case__ : List[Any] = out_channels
else:
snake_case__ : Dict = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase ):
snake_case__ : List[str] = output_channel
snake_case__ : List[str] = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels
)
snake_case__ : List[str] = i == len(lowerCamelCase ) - 1
snake_case__ : str = get_up_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase )
snake_case__ : Optional[Any] = output_channel
# out
snake_case__ : List[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
snake_case__ : Union[str, Any] = get_out_block(
out_block_type=lowerCamelCase , num_groups_out=lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase , act_fn=lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
snake_case__ : str = timestep
if not torch.is_tensor(lowerCamelCase ):
snake_case__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0:
snake_case__ : Optional[Any] = timesteps[None].to(sample.device )
snake_case__ : Any = self.time_proj(lowerCamelCase )
if self.config.use_timestep_embedding:
snake_case__ : Tuple = self.time_mlp(lowerCamelCase )
else:
snake_case__ : Union[str, Any] = timestep_embed[..., None]
snake_case__ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
snake_case__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
snake_case__ : List[Any] = ()
for downsample_block in self.down_blocks:
snake_case__ ,snake_case__ : Optional[int] = downsample_block(hidden_states=lowerCamelCase , temb=lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case__ : Any = self.mid_block(lowerCamelCase , lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
snake_case__ : str = down_block_res_samples[-1:]
snake_case__ : int = down_block_res_samples[:-1]
snake_case__ : Optional[Any] = upsample_block(lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , temb=lowerCamelCase )
# 5. post-process
if self.out_block:
snake_case__ : Dict = self.out_block(lowerCamelCase , lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase )
| 694 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
snake_case__ : Any = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(lowerCamelCase )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Any = '''sshleifer/tiny-gpt2'''
snake_case__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=lowerCamelCase , multi_process=lowerCamelCase , )
snake_case__ : Any = TensorFlowBenchmark(lowerCamelCase )
snake_case__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Any = '''sgugger/tiny-distilbert-classification'''
snake_case__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , only_pretrain_model=lowerCamelCase , )
snake_case__ : Union[str, Any] = TensorFlowBenchmark(lowerCamelCase )
snake_case__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : str = '''sshleifer/tiny-gpt2'''
snake_case__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
snake_case__ : Optional[int] = TensorFlowBenchmark(lowerCamelCase )
snake_case__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = '''sshleifer/tiny-gpt2'''
snake_case__ : str = AutoConfig.from_pretrained(lowerCamelCase )
snake_case__ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=lowerCamelCase , multi_process=lowerCamelCase , )
snake_case__ : Any = TensorFlowBenchmark(lowerCamelCase , [config] )
snake_case__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = '''sshleifer/tiny-gpt2'''
snake_case__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase )
snake_case__ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
snake_case__ : Dict = TensorFlowBenchmark(lowerCamelCase , [config] )
snake_case__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Dict = '''sshleifer/tiny-gpt2'''
snake_case__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
snake_case__ : Optional[int] = TensorFlowBenchmark(lowerCamelCase )
snake_case__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = '''sshleifer/tiny-gpt2'''
snake_case__ : Dict = AutoConfig.from_pretrained(lowerCamelCase )
snake_case__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
snake_case__ : List[Any] = TensorFlowBenchmark(lowerCamelCase , [config] )
snake_case__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Optional[Any] = '''patrickvonplaten/t5-tiny-random'''
snake_case__ : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase )
snake_case__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
snake_case__ : int = TensorFlowBenchmark(lowerCamelCase , configs=[config] )
snake_case__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = '''sshleifer/tiny-gpt2'''
snake_case__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=lowerCamelCase , multi_process=lowerCamelCase , )
snake_case__ : Optional[int] = TensorFlowBenchmark(lowerCamelCase )
snake_case__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Tuple = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=lowerCamelCase , save_to_csv=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowerCamelCase , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(lowerCamelCase , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(lowerCamelCase , '''env.csv''' ) , multi_process=lowerCamelCase , )
snake_case__ : List[str] = TensorFlowBenchmark(lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(lowerCamelCase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase , '''env.csv''' ) ).exists() )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : int = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(lowerCamelCase ):
self.assertTrue(hasattr(lowerCamelCase , '''sequential''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''cumulative''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''current''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowerCamelCase , '''log.txt''' ) , log_print=lowerCamelCase , trace_memory_line_by_line=lowerCamelCase , eager_mode=lowerCamelCase , multi_process=lowerCamelCase , )
snake_case__ : int = TensorFlowBenchmark(lowerCamelCase )
snake_case__ : int = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(lowerCamelCase , '''log.txt''' ) ).exists() )
| 694 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _A ( snake_case__ : str , snake_case__ : str ):
snake_case__ : Tuple = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
snake_case__ : List[str] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
snake_case__ : str = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , snake_case__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
snake_case__ : Any = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
snake_case__ : Any = os.path.join(get_home_dir() , '''models''' )
snake_case__ : List[Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ )
snake_case__ : Optional[int] = nlp.model.BERTModel(
snake_case__ , len(snake_case__ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=snake_case__ , use_decoder=snake_case__ , )
original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ )
snake_case__ : Any = original_bort._collect_params_with_prefix()
# Build our config 🤗
snake_case__ : Union[str, Any] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(snake_case__ ),
}
snake_case__ : Dict = BertConfig.from_dict(snake_case__ )
snake_case__ : Dict = BertForMaskedLM(snake_case__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ : str ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ : List[Any] , snake_case__ : Any ):
snake_case__ : Union[str, Any] = hf_param.shape
snake_case__ : Any = to_torch(params[gluon_param] )
snake_case__ : Dict = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
snake_case__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
snake_case__ : int = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
snake_case__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
snake_case__ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
snake_case__ : str = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
snake_case__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
snake_case__ : BertSelfAttention = layer.attention.self
snake_case__ : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
snake_case__ : Dict = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
snake_case__ : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
snake_case__ : int = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
snake_case__ : List[Any] = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
snake_case__ : List[Any] = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
snake_case__ : BertSelfOutput = layer.attention.output
snake_case__ : Optional[Any] = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
snake_case__ : List[str] = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
snake_case__ : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
snake_case__ : Any = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
snake_case__ : BertIntermediate = layer.intermediate
snake_case__ : int = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
snake_case__ : Optional[int] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
snake_case__ : BertOutput = layer.output
snake_case__ : Any = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
snake_case__ : Tuple = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
snake_case__ : Tuple = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
snake_case__ : Union[str, Any] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
snake_case__ : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' )
snake_case__ : str = tokenizer.encode_plus(snake_case__ )['''input_ids''']
# Get gluon output
snake_case__ : List[str] = mx.nd.array([input_ids] )
snake_case__ : Optional[int] = original_bort(inputs=snake_case__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case__ )
snake_case__ : Optional[Any] = BertModel.from_pretrained(snake_case__ )
hf_bort_model.eval()
snake_case__ : Optional[Any] = tokenizer.encode_plus(snake_case__ , return_tensors='''pt''' )
snake_case__ : str = hf_bort_model(**snake_case__ )[0]
snake_case__ : str = output_gluon[0].asnumpy()
snake_case__ : str = output_hf[0].detach().numpy()
snake_case__ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
snake_case__ : Optional[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 694 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=3 , lowerCamelCase=32 , lowerCamelCase=3 , lowerCamelCase=10 , lowerCamelCase=[10, 20, 30, 40] , lowerCamelCase=[1, 1, 2, 1] , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="relu" , lowerCamelCase=3 , lowerCamelCase=None , ) -> Dict:
"""simple docstring"""
snake_case__ : str = parent
snake_case__ : int = batch_size
snake_case__ : Optional[Any] = image_size
snake_case__ : Any = num_channels
snake_case__ : Dict = embeddings_size
snake_case__ : Union[str, Any] = hidden_sizes
snake_case__ : Tuple = depths
snake_case__ : Tuple = is_training
snake_case__ : int = use_labels
snake_case__ : int = hidden_act
snake_case__ : Any = num_labels
snake_case__ : Union[str, Any] = scope
snake_case__ : Optional[int] = len(lowerCamelCase )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[str] = self.get_config()
return config, pixel_values
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[str] = FlaxRegNetModel(config=lowerCamelCase )
snake_case__ : Dict = model(lowerCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : int = self.num_labels
snake_case__ : int = FlaxRegNetForImageClassification(config=lowerCamelCase )
snake_case__ : Optional[int] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Tuple = self.prepare_config_and_inputs()
snake_case__ ,snake_case__ : Tuple = config_and_inputs
snake_case__ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowercase__ ( self ) -> None:
"""simple docstring"""
snake_case__ : str = FlaxRegNetModelTester(self )
snake_case__ : List[Any] = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ ,snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = model_class(lowerCamelCase )
snake_case__ : Any = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Union[str, Any] = [*signature.parameters.keys()]
snake_case__ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ : List[str] = model_class(lowerCamelCase )
snake_case__ : Tuple = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
snake_case__ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ : str = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ) , expected_num_stages + 1 )
snake_case__ ,snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[Any] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ ,snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case__ : Any = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = model_class(lowerCamelCase )
@jax.jit
def model_jitted(lowerCamelCase , **lowerCamelCase ):
return model(pixel_values=lowerCamelCase , **lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
snake_case__ : List[str] = model_jitted(**lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
snake_case__ : int = model_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _A ( ):
snake_case__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[str] = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
snake_case__ : Dict = self.default_image_processor
snake_case__ : str = prepare_img()
snake_case__ : Dict = image_processor(images=lowerCamelCase , return_tensors='''np''' )
snake_case__ : Any = model(**lowerCamelCase )
# verify the logits
snake_case__ : Dict = (1, 1000)
self.assertEqual(outputs.logits.shape , lowerCamelCase )
snake_case__ : List[Any] = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
| 694 |
'''simple docstring'''
def _A ( snake_case__ : int = 4_00_00_00 ):
snake_case__ : int = []
snake_case__ ,snake_case__ : Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case__ )
snake_case__ ,snake_case__ : Any = b, a + b
return sum(snake_case__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=3 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = parent
snake_case__ : Tuple = batch_size
snake_case__ : int = seq_length
snake_case__ : List[str] = is_training
snake_case__ : List[Any] = use_input_mask
snake_case__ : str = use_token_type_ids
snake_case__ : Optional[int] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Dict = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : Tuple = hidden_act
snake_case__ : Dict = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : Optional[int] = type_vocab_size
snake_case__ : str = type_sequence_label_size
snake_case__ : str = initializer_range
snake_case__ : List[str] = num_labels
snake_case__ : Dict = num_choices
snake_case__ : Union[str, Any] = scope
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Any = None
if self.use_input_mask:
snake_case__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Any = None
snake_case__ : Dict = None
snake_case__ : List[Any] = None
snake_case__ : str = None
if self.use_labels:
snake_case__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowerCamelCase , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
"""simple docstring"""
snake_case__ : Union[str, Any] = FalconModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : int = model(lowerCamelCase , attention_mask=lowerCamelCase )
snake_case__ : Dict = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : int = True
snake_case__ : Any = FalconModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Optional[Any] = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
snake_case__ : Optional[int] = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
snake_case__ : List[str] = model(lowerCamelCase , attention_mask=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Tuple:
"""simple docstring"""
snake_case__ : Union[str, Any] = FalconForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Optional[Any] = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
snake_case__ : int = True
snake_case__ : int = True
snake_case__ : str = FalconForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# first forward pass
snake_case__ : List[Any] = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
snake_case__ : Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ : int = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )['''hidden_states'''][0]
snake_case__ : Any = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )['''hidden_states'''][0]
# select random slice
snake_case__ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,
) : str = config_and_inputs
snake_case__ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (FalconForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase = (
{
'feature-extraction': FalconModel,
'text-classification': FalconForSequenceClassification,
'text-generation': FalconForCausalLM,
'question-answering': FalconForQuestionAnswering,
'token-classification': FalconForTokenClassification,
'zero-shot': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : int = FalconModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ ,*snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
snake_case__ : List[str] = alibi
self.model_tester.create_and_check_model(lowerCamelCase , *lowerCamelCase )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ ,snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = 3
snake_case__ : Optional[Any] = input_dict['''input_ids''']
snake_case__ : Dict = input_ids.ne(1 ).to(lowerCamelCase )
snake_case__ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ : Union[str, Any] = FalconForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Union[str, Any] = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ ,snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = 3
snake_case__ : Dict = '''single_label_classification'''
snake_case__ : int = input_dict['''input_ids''']
snake_case__ : Tuple = input_ids.ne(1 ).to(lowerCamelCase )
snake_case__ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ : List[str] = FalconForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : str = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Optional[Any] = input_dict['''input_ids''']
snake_case__ : int = FalconForCausalLM(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Union[str, Any] = model(lowerCamelCase , use_cache=lowerCamelCase )
snake_case__ : Any = input_ids.shape[0]
snake_case__ : str = model._convert_to_rw_cache(result.past_key_values )
snake_case__ : List[str] = model._convert_cache_to_standard_format(lowerCamelCase , lowerCamelCase )
for layer in range(len(lowerCamelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ ,snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Tuple = 3
snake_case__ : List[str] = '''multi_label_classification'''
snake_case__ : str = input_dict['''input_ids''']
snake_case__ : Tuple = input_ids.ne(1 ).to(lowerCamelCase )
snake_case__ : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case__ : Union[str, Any] = FalconForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Dict = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_class in self.all_generative_model_classes:
snake_case__ ,snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(lowerCamelCase , '''use_cache''' ):
return
snake_case__ : Optional[Any] = model_class(lowerCamelCase ).to(lowerCamelCase )
if "use_cache" not in inputs:
snake_case__ : Any = True
snake_case__ : List[Any] = model(**lowerCamelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
snake_case__ : Optional[Any] = (
getattr(lowerCamelCase , '''decoder_layers''' , lowerCamelCase )
or getattr(lowerCamelCase , '''num_decoder_layers''' , lowerCamelCase )
or config.num_hidden_layers
)
snake_case__ : Tuple = getattr(lowerCamelCase , '''num_kv_heads''' , config.num_attention_heads )
snake_case__ : Union[str, Any] = getattr(lowerCamelCase , '''d_model''' , config.hidden_size )
snake_case__ : Any = embed_dim // num_attention_heads
snake_case__ : List[Any] = outputs['''past_key_values''']
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
snake_case__ ,snake_case__ : Tuple = inputs['''input_ids'''].shape
for i in range(lowerCamelCase ):
if config.new_decoder_architecture:
snake_case__ : Optional[Any] = config.num_attention_heads
elif config.multi_query:
snake_case__ : Tuple = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : int = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
snake_case__ : str = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(lowerCamelCase )
snake_case__ : Any = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowerCamelCase )
snake_case__ : str = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
snake_case__ : Dict = model.generate(**lowerCamelCase , do_sample=lowerCamelCase , max_new_tokens=19 )
snake_case__ : List[Any] = tokenizer.batch_decode(lowerCamelCase )[0]
self.assertEqual(lowerCamelCase , lowerCamelCase )
@slow
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase )
snake_case__ : Union[str, Any] = FalconForCausalLM.from_pretrained(lowerCamelCase )
model.eval()
model.to(lowerCamelCase )
snake_case__ : List[Any] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowerCamelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**lowerCamelCase , do_sample=lowerCamelCase , max_new_tokens=4 )
model.generate(**lowerCamelCase , do_sample=lowerCamelCase , max_new_tokens=4 )
model.generate(**lowerCamelCase , num_beams=2 , max_new_tokens=4 )
@slow
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase )
snake_case__ : List[str] = FalconForCausalLM.from_pretrained(lowerCamelCase )
model.eval()
model.to(device=lowerCamelCase )
snake_case__ : Dict = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowerCamelCase )
# Test results are the same with and without cache
snake_case__ : Dict = model.generate(**lowerCamelCase , do_sample=lowerCamelCase , max_new_tokens=20 , use_cache=lowerCamelCase )
snake_case__ : List[Any] = model.generate(**lowerCamelCase , do_sample=lowerCamelCase , max_new_tokens=20 , use_cache=lowerCamelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 694 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : int = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
_lowerCAmelCase : Optional[int] = {
"google/pegasus-xsum": 5_1_2,
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PegasusTokenizer
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<pad>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<mask_2>" , lowerCamelCase="<mask_1>" , lowerCamelCase=None , lowerCamelCase=103 , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCamelCase )}, but is'''
f''' {type(lowerCamelCase )}''' )
snake_case__ : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCamelCase ) , self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
snake_case__ : List[Any] = additional_special_tokens_extended
else:
snake_case__ : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , pad_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , mask_token=lowerCamelCase , mask_token_sent=lowerCamelCase , offset=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Union[str, Any] = vocab_file
snake_case__ : List[Any] = False if not self.vocab_file else True
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Tuple = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : int = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 694 | 1 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _A ( snake_case__ : Optional[Any] ):
for param in module.parameters():
snake_case__ : List[Any] = False
def _A ( ):
snake_case__ : Union[str, Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
snake_case__ : List[Any] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _A ( snake_case__ : Dict ):
snake_case__ : List[str] = plt.imshow(snake_case__ )
fig.axes.get_xaxis().set_visible(snake_case__ )
fig.axes.get_yaxis().set_visible(snake_case__ )
plt.show()
def _A ( ):
snake_case__ : List[str] = datetime.now()
snake_case__ : Tuple = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 694 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=0 ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[Any] = 1.0 if scale is None else scale
snake_case__ : Dict = 0.0 if loc is None else loc
super().__init__(lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase )] )
@property
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return self.variance.sqrt()
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Tuple = args_dim
snake_case__ : str = nn.ModuleList([nn.Linear(lowerCamelCase , lowerCamelCase ) for dim in args_dim.values()] )
snake_case__ : Optional[int] = domain_map
def lowercase__ ( self , lowerCamelCase ) -> Tuple[torch.Tensor]:
"""simple docstring"""
snake_case__ : Any = [proj(lowerCamelCase ) for proj in self.proj]
return self.domain_map(*lowerCamelCase )
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Tuple = function
def lowercase__ ( self , lowerCamelCase , *lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.function(lowerCamelCase , *lowerCamelCase )
class snake_case :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self , lowerCamelCase = 1 ) -> None:
"""simple docstring"""
snake_case__ : Optional[Any] = dim
snake_case__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowerCamelCase )
else:
return Independent(self.distribution_class(*lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Distribution:
"""simple docstring"""
snake_case__ : List[Any] = self._base_distribution(lowerCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase , loc=lowerCamelCase , scale=lowerCamelCase , event_dim=self.event_dim )
@property
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def lowercase__ ( self ) -> float:
"""simple docstring"""
return 0.0
def lowercase__ ( self , lowerCamelCase ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowercase__ ( self , *lowerCamelCase ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def lowercase__ ( lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Tuple = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
snake_case__ : Optional[int] = 2.0 + cls.squareplus(lowerCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowercase__ ( self , lowerCamelCase ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : str = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCAmelCase : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def _A ( snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int ):
snake_case__ : List[Any] = state_dict.pop(snake_case__ )
snake_case__ : List[str] = val
def _A ( snake_case__ : List[str] ):
snake_case__ : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case__ : str = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
snake_case__ : List[Any] = value
else:
snake_case__ : Any = value
return new_state_dict
def _A ( snake_case__ : int , snake_case__ : Any=False ):
snake_case__ : int = ''''''
if is_panoptic:
snake_case__ : Optional[Any] = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case__ : Optional[int] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[:2_56, :]
snake_case__ : str = in_proj_bias[:2_56]
snake_case__ : Dict = in_proj_weight[2_56:5_12, :]
snake_case__ : Tuple = in_proj_bias[2_56:5_12]
snake_case__ : int = in_proj_weight[-2_56:, :]
snake_case__ : Tuple = in_proj_bias[-2_56:]
def _A ( ):
snake_case__ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ : int = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _A ( snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ):
snake_case__ : List[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case__ : Optional[int] = '''resnet101'''
if "dc5" in model_name:
snake_case__ : List[Any] = True
snake_case__ : Dict = '''panoptic''' in model_name
if is_panoptic:
snake_case__ : Union[str, Any] = 2_50
else:
snake_case__ : str = 91
snake_case__ : Tuple = '''huggingface/label-files'''
snake_case__ : Dict = '''coco-detection-id2label.json'''
snake_case__ : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ : Tuple = {int(snake_case__ ): v for k, v in idalabel.items()}
snake_case__ : Tuple = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
# load image processor
snake_case__ : Optional[int] = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
snake_case__ : int = ConditionalDetrImageProcessor(format=snake_case__ )
# prepare image
snake_case__ : Dict = prepare_img()
snake_case__ : int = image_processor(images=snake_case__ , return_tensors='''pt''' )
snake_case__ : Any = encoding['''pixel_values''']
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
snake_case__ : Union[str, Any] = torch.hub.load('''DeppMeng/ConditionalDETR''' , snake_case__ , pretrained=snake_case__ ).eval()
snake_case__ : str = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case__ : Optional[Any] = '''conditional_detr.''' + src
rename_key(snake_case__ , snake_case__ , snake_case__ )
snake_case__ : Dict = rename_backbone_keys(snake_case__ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case__ , is_panoptic=snake_case__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ : Tuple = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
snake_case__ : int = state_dict.pop(snake_case__ )
snake_case__ : str = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case__ : str = state_dict.pop(snake_case__ )
snake_case__ : List[str] = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
snake_case__ : Any = state_dict.pop(snake_case__ )
snake_case__ : Union[str, Any] = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
snake_case__ : Dict = state_dict.pop(snake_case__ )
snake_case__ : Optional[Any] = val
# finally, create HuggingFace model and load state dict
snake_case__ : int = ConditionalDetrForSegmentation(snake_case__ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
model.push_to_hub(repo_id=snake_case__ , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
snake_case__ : Optional[Any] = conditional_detr(snake_case__ )
snake_case__ : List[Any] = model(snake_case__ )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 694 |
'''simple docstring'''
from math import factorial
def _A ( snake_case__ : int = 20 ):
snake_case__ : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case__ : Union[str, Any] = n // 2
return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
_lowerCAmelCase : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 694 | 1 |
'''simple docstring'''
def _A ( snake_case__ : int = 1 , snake_case__ : int = 10_00 ):
snake_case__ : Union[str, Any] = 1
snake_case__ : List[str] = 0
for divide_by_number in range(snake_case__ , digit + 1 ):
snake_case__ : list[int] = []
snake_case__ : Any = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(snake_case__ ):
snake_case__ : Any = len(snake_case__ )
snake_case__ : Union[str, Any] = divide_by_number
else:
has_been_divided.append(snake_case__ )
snake_case__ : str = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = (EulerDiscreteScheduler,)
_lowerCAmelCase = 1_0
def lowercase__ ( self , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Any = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCamelCase )
return config
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Any = self.get_scheduler_config()
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : Any = self.dummy_model()
snake_case__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : List[Any] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : int = model(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Tuple = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Tuple = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Optional[Any] = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : Optional[int] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : Union[str, Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Union[str, Any] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : List[str] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Optional[int] = self.get_scheduler_config()
snake_case__ : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Tuple = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : str = model(lowerCamelCase , lowerCamelCase )
snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : int = output.prev_sample
snake_case__ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : int = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Dict = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config()
snake_case__ : List[Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Dict = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Optional[Any] = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : Dict = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
| 694 | 1 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _A ( snake_case__ : List[Any] , snake_case__ : int=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def _A ( snake_case__ : List[Any] , snake_case__ : List[Any]=0 ):
snake_case__ : Union[str, Any] = []
for old_item in old_list:
snake_case__ : int = old_item.replace('''in_layers.0''' , '''norm1''' )
snake_case__ : List[str] = new_item.replace('''in_layers.2''' , '''conv1''' )
snake_case__ : int = new_item.replace('''out_layers.0''' , '''norm2''' )
snake_case__ : Optional[Any] = new_item.replace('''out_layers.3''' , '''conv2''' )
snake_case__ : Optional[Any] = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
snake_case__ : Union[str, Any] = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
snake_case__ : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def _A ( snake_case__ : Any , snake_case__ : Optional[int]=0 ):
snake_case__ : Tuple = []
for old_item in old_list:
snake_case__ : Dict = old_item
snake_case__ : Dict = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
snake_case__ : Optional[Any] = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
snake_case__ : Union[str, Any] = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
snake_case__ : Tuple = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
snake_case__ : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def _A ( snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[int]=None , snake_case__ : Dict=None ):
assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
snake_case__ : Union[str, Any] = old_checkpoint[path]
snake_case__ : Tuple = old_tensor.shape[0] // 3
snake_case__ : List[str] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
snake_case__ : List[str] = old_tensor.shape[0] // config['''num_head_channels'''] // 3
snake_case__ : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
snake_case__ ,snake_case__ ,snake_case__ : int = old_tensor.split(channels // num_heads , dim=1 )
snake_case__ : Union[str, Any] = query.reshape(snake_case__ )
snake_case__ : Optional[Any] = key.reshape(snake_case__ )
snake_case__ : Optional[int] = value.reshape(snake_case__ )
for path in paths:
snake_case__ : str = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
snake_case__ : Union[str, Any] = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
snake_case__ : Optional[Any] = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
snake_case__ : Optional[int] = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
snake_case__ : List[Any] = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
snake_case__ : List[str] = old_checkpoint[path['''old''']][:, :, 0]
else:
snake_case__ : str = old_checkpoint[path['''old''']]
def _A ( snake_case__ : Optional[Any] , snake_case__ : Tuple ):
snake_case__ : Optional[int] = {}
snake_case__ : List[Any] = checkpoint['''time_embed.0.weight''']
snake_case__ : Union[str, Any] = checkpoint['''time_embed.0.bias''']
snake_case__ : Dict = checkpoint['''time_embed.2.weight''']
snake_case__ : Optional[int] = checkpoint['''time_embed.2.bias''']
snake_case__ : List[Any] = checkpoint['''input_blocks.0.0.weight''']
snake_case__ : Optional[Any] = checkpoint['''input_blocks.0.0.bias''']
snake_case__ : str = checkpoint['''out.0.weight''']
snake_case__ : List[str] = checkpoint['''out.0.bias''']
snake_case__ : Dict = checkpoint['''out.2.weight''']
snake_case__ : Tuple = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
snake_case__ : Tuple = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
snake_case__ : List[str] = {
layer_id: [key for key in checkpoint if f'''input_blocks.{layer_id}''' in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the middle blocks only
snake_case__ : List[Any] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
snake_case__ : List[str] = {
layer_id: [key for key in checkpoint if f'''middle_block.{layer_id}''' in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the output blocks only
snake_case__ : List[str] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
snake_case__ : str = {
layer_id: [key for key in checkpoint if f'''output_blocks.{layer_id}''' in key]
for layer_id in range(snake_case__ )
}
for i in range(1 , snake_case__ ):
snake_case__ : Any = (i - 1) // (config['''num_res_blocks'''] + 1)
snake_case__ : List[str] = (i - 1) % (config['''num_res_blocks'''] + 1)
snake_case__ : Union[str, Any] = [key for key in input_blocks[i] if f'''input_blocks.{i}.0''' in key]
snake_case__ : Dict = [key for key in input_blocks[i] if f'''input_blocks.{i}.1''' in key]
if f'''input_blocks.{i}.0.op.weight''' in checkpoint:
snake_case__ : Optional[Any] = checkpoint[
f'''input_blocks.{i}.0.op.weight'''
]
snake_case__ : Optional[int] = checkpoint[
f'''input_blocks.{i}.0.op.bias'''
]
continue
snake_case__ : Dict = renew_resnet_paths(snake_case__ )
snake_case__ : int = {'''old''': f'''input_blocks.{i}.0''', '''new''': f'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
snake_case__ : Optional[Any] = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ )
if len(snake_case__ ):
snake_case__ : Any = renew_attention_paths(snake_case__ )
snake_case__ : Tuple = {
'''old''': f'''input_blocks.{i}.1''',
'''new''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
snake_case__ : Dict = {
f'''input_blocks.{i}.1.qkv.bias''': {
'''key''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'''query''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'''value''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''input_blocks.{i}.1.qkv.weight''': {
'''key''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'''query''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'''value''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , )
snake_case__ : Dict = middle_blocks[0]
snake_case__ : Optional[Any] = middle_blocks[1]
snake_case__ : List[str] = middle_blocks[2]
snake_case__ : Union[str, Any] = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
snake_case__ : Optional[Any] = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
snake_case__ : Tuple = renew_attention_paths(snake_case__ )
snake_case__ : Dict = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ )
for i in range(snake_case__ ):
snake_case__ : Optional[int] = i // (config['''num_res_blocks'''] + 1)
snake_case__ : str = i % (config['''num_res_blocks'''] + 1)
snake_case__ : str = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]]
snake_case__ : str = {}
for layer in output_block_layers:
snake_case__ ,snake_case__ : Union[str, Any] = layer.split('''.''' )[0], shave_segments(snake_case__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case__ )
else:
snake_case__ : Union[str, Any] = [layer_name]
if len(snake_case__ ) > 1:
snake_case__ : Tuple = [key for key in output_blocks[i] if f'''output_blocks.{i}.0''' in key]
snake_case__ : Optional[Any] = [key for key in output_blocks[i] if f'''output_blocks.{i}.1''' in key]
snake_case__ : Optional[int] = renew_resnet_paths(snake_case__ )
snake_case__ : Tuple = renew_resnet_paths(snake_case__ )
snake_case__ : Any = {'''old''': f'''output_blocks.{i}.0''', '''new''': f'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
snake_case__ : List[str] = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
snake_case__ : int = checkpoint[
f'''output_blocks.{i}.{index}.conv.weight'''
]
snake_case__ : List[str] = checkpoint[
f'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(snake_case__ ) == 2:
snake_case__ : Optional[int] = []
if len(snake_case__ ):
snake_case__ : Any = renew_attention_paths(snake_case__ )
snake_case__ : Optional[Any] = {
'''old''': f'''output_blocks.{i}.1''',
'''new''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
snake_case__ : List[Any] = {
f'''output_blocks.{i}.1.qkv.bias''': {
'''key''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'''query''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'''value''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''output_blocks.{i}.1.qkv.weight''': {
'''key''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'''query''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'''value''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=snake_case__ , )
else:
snake_case__ : Tuple = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
snake_case__ : Tuple = '''.'''.join(['''output_blocks''', str(snake_case__ ), path['''old''']] )
snake_case__ : Optional[Any] = '''.'''.join(['''up_blocks''', str(snake_case__ ), '''resnets''', str(snake_case__ ), path['''new''']] )
snake_case__ : int = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
_lowerCAmelCase : str = parser.parse_args()
_lowerCAmelCase : str = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowerCAmelCase : Optional[Any] = json.loads(f.read())
_lowerCAmelCase : Union[str, Any] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowerCAmelCase : Union[str, Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowerCAmelCase : List[str] = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
_lowerCAmelCase : List[Any] = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
_lowerCAmelCase : Optional[int] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 694 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = ['pixel_values']
def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : int = do_resize
snake_case__ : Dict = do_rescale
snake_case__ : Any = size_divisor
snake_case__ : str = resample
super().__init__(**lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
snake_case__ ,snake_case__ : Any = get_image_size(lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
snake_case__ : Any = height // size_divisor * size_divisor
snake_case__ : Union[str, Any] = width // size_divisor * size_divisor
snake_case__ : Tuple = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
return image
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> BatchFeature:
"""simple docstring"""
snake_case__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : Any = size_divisor if size_divisor is not None else self.size_divisor
snake_case__ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
snake_case__ : Optional[Any] = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
snake_case__ : Optional[int] = [to_numpy_array(lowerCamelCase ) for img in images]
if do_resize:
snake_case__ : Union[str, Any] = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
snake_case__ : str = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images]
snake_case__ : Tuple = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
snake_case__ : str = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 694 | 1 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _A ( snake_case__ : Any , snake_case__ : List[str] ):
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _A ( snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Tuple ):
snake_case__ : Dict = tmp_path / '''cache'''
snake_case__ : Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ : Tuple = JsonDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_json_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _A ( snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Optional[int] ):
snake_case__ : List[str] = tmp_path / '''cache'''
snake_case__ : List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case__ : Tuple = features.copy() if features else default_expected_features
snake_case__ : Tuple = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : Any = JsonDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_json_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def _A ( snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Tuple ):
snake_case__ : Optional[int] = tmp_path / '''cache'''
snake_case__ : Dict = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
snake_case__ : Optional[Any] = features.copy() if features else default_expected_features
snake_case__ : Optional[int] = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : str = JsonDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _A ( snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
snake_case__ : Tuple = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
snake_case__ : List[str] = features.copy()
snake_case__ : int = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : List[Any] = tmp_path / '''cache'''
snake_case__ : int = JsonDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _A ( snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any ):
snake_case__ : List[str] = tmp_path / '''cache'''
snake_case__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case__ : int = JsonDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_json_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def _A ( snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int ):
if issubclass(snake_case__ , snake_case__ ):
snake_case__ : Any = jsonl_path
elif issubclass(snake_case__ , snake_case__ ):
snake_case__ : List[str] = [jsonl_path]
snake_case__ : Tuple = tmp_path / '''cache'''
snake_case__ : Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case__ : Any = JsonDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_json_dataset(snake_case__ , snake_case__ )
def _A ( snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str=("train",) ):
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
snake_case__ : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _A ( snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] ):
snake_case__ : Optional[Any] = tmp_path / '''cache'''
snake_case__ : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ : Dict = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_json_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _A ( snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] ):
snake_case__ : Dict = tmp_path / '''cache'''
snake_case__ : Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case__ : Tuple = features.copy() if features else default_expected_features
snake_case__ : str = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : Any = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_json_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _A ( snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
if split:
snake_case__ : Dict = {split: jsonl_path}
else:
snake_case__ : List[Any] = '''train'''
snake_case__ : Any = {'''train''': jsonl_path, '''test''': jsonl_path}
snake_case__ : Dict = tmp_path / '''cache'''
snake_case__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case__ : Dict = JsonDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_json_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _A ( snake_case__ : Optional[int] ):
return json.load(snake_case__ )
def _A ( snake_case__ : Union[str, Any] ):
return [json.loads(snake_case__ ) for line in buffer]
class snake_case :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase , lowerCamelCase , lines=lowerCamelCase ).write()
buffer.seek(0 )
snake_case__ : Optional[Any] = load_json_function(lowerCamelCase )
assert isinstance(lowerCamelCase , lowerCamelCase )
assert isinstance(exported_content[0] , lowerCamelCase )
assert len(lowerCamelCase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase , lowerCamelCase , lines=lowerCamelCase , orient=lowerCamelCase ).write()
buffer.seek(0 )
snake_case__ : Optional[int] = load_json(lowerCamelCase )
assert isinstance(lowerCamelCase , lowerCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCamelCase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase , lowerCamelCase , lines=lowerCamelCase , num_proc=2 ).write()
buffer.seek(0 )
snake_case__ : List[Any] = load_json_function(lowerCamelCase )
assert isinstance(lowerCamelCase , lowerCamelCase )
assert isinstance(exported_content[0] , lowerCamelCase )
assert len(lowerCamelCase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase , lowerCamelCase , lines=lowerCamelCase , orient=lowerCamelCase , num_proc=2 ).write()
buffer.seek(0 )
snake_case__ : Tuple = load_json(lowerCamelCase )
assert isinstance(lowerCamelCase , lowerCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCamelCase ) == 10
def lowercase__ ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
with pytest.raises(lowerCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase , lowerCamelCase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : List[Any] = tmp_path_factory.mktemp('''data''' ) / f'''test.json.{extension}'''
snake_case__ : int = str(shared_datadir / f'''test_file.json.{extension}''' )
JsonDatasetWriter(lowerCamelCase , lowerCamelCase , compression=lowerCamelCase ).write()
with fsspec.open(lowerCamelCase , '''rb''' , compression='''infer''' ) as f:
snake_case__ : Optional[Any] = f.read()
with fsspec.open(lowerCamelCase , '''rb''' , compression='''infer''' ) as f:
snake_case__ : Dict = f.read()
assert exported_content == original_content
| 694 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ):
snake_case__ : List[Any] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
| 694 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
_lowerCAmelCase : Optional[Any] = {"facebook/bart-base": BartForConditionalGeneration}
_lowerCAmelCase : List[Any] = {"facebook/bart-base": BartTokenizer}
def _A ( ):
snake_case__ : List[Any] = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=snake_case__ , default=snake_case__ , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=snake_case__ , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=snake_case__ , default=snake_case__ , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case__ , )
parser.add_argument(
'''--config_name''' , type=snake_case__ , default=snake_case__ , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=snake_case__ , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=snake_case__ , default=snake_case__ , help='''Where to store the final ONNX file.''' )
snake_case__ : Dict = parser.parse_args()
return args
def _A ( snake_case__ : List[str] , snake_case__ : Optional[Any]="cpu" ):
snake_case__ : List[str] = model_dict[model_name].from_pretrained(snake_case__ ).to(snake_case__ )
snake_case__ : Optional[int] = tokenizer_dict[model_name].from_pretrained(snake_case__ )
if model_name in ["facebook/bart-base"]:
snake_case__ : Tuple = 0
snake_case__ : List[Any] = None
snake_case__ : List[Any] = 0
return huggingface_model, tokenizer
def _A ( snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Any ):
model.eval()
snake_case__ : str = None
snake_case__ : str = torch.jit.script(BARTBeamSearchGenerator(snake_case__ ) )
with torch.no_grad():
snake_case__ : Dict = '''My friends are cool but they eat too many carbs.'''
snake_case__ : Union[str, Any] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors='''pt''' ).to(model.device )
snake_case__ : str = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=snake_case__ , max_length=snake_case__ , early_stopping=snake_case__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
snake_case__ , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , snake_case__ , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=snake_case__ , )
logger.info('''Model exported to {}'''.format(snake_case__ ) )
snake_case__ : Optional[Any] = remove_dup_initializers(os.path.abspath(snake_case__ ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(snake_case__ ) )
snake_case__ : List[str] = onnxruntime.InferenceSession(snake_case__ )
snake_case__ : Tuple = ort_sess.run(
snake_case__ , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(snake_case__ ),
'''max_length''': np.array(snake_case__ ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def _A ( ):
snake_case__ : Optional[int] = parse_args()
snake_case__ : int = 5
snake_case__ : str = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
snake_case__ : Optional[Any] = torch.device(args.device )
snake_case__ ,snake_case__ : Optional[int] = load_model_tokenizer(args.model_name_or_path , snake_case__ )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(snake_case__ )
if args.max_length:
snake_case__ : int = args.max_length
if args.num_beams:
snake_case__ : Optional[int] = args.num_beams
if args.output_file_path:
snake_case__ : int = args.output_file_path
else:
snake_case__ : Union[str, Any] = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 694 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
snake_case__ : Optional[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
def _A ( snake_case__ : list[list] ):
snake_case__ : Any = current_set.copy()
for row_index, row in enumerate(snake_case__ ):
snake_case__ : Union[str, Any] = row[0]
for column_index, column in enumerate(snake_case__ ):
if magnitude == 0:
snake_case__ : Optional[Any] = column
continue
snake_case__ : Union[str, Any] = column / magnitude
# Subtract to cancel term
snake_case__ : Union[str, Any] = current_set[0]
snake_case__ : Tuple = [first_row]
snake_case__ : Dict = current_set[1::]
for row in current_set:
snake_case__ : Any = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(snake_case__ )
continue
for column_index in range(len(snake_case__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(snake_case__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
snake_case__ : Optional[Any] = final_set[0]
snake_case__ : Optional[int] = []
snake_case__ : str = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
snake_case__ : Optional[Any] = simplify(snake_case__ )
for i in range(len(snake_case__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , snake_case__ )
snake_case__ : Dict = resultant
return final_set
def _A ( snake_case__ : list[list] ):
if len(snake_case__ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
snake_case__ : int = len(snake_case__ ) + 1
if any(len(snake_case__ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(snake_case__ , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(snake_case__ ) == 1:
return [equations[0][-1] / equations[0][0]]
snake_case__ : Optional[int] = equations.copy()
if any(0 in row for row in data_set ):
snake_case__ : List[str] = data_set.copy()
snake_case__ : Optional[int] = []
for row_index, row in enumerate(snake_case__ ):
if 0 not in row:
snake_case__ : int = data_set.pop(snake_case__ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , snake_case__ )
snake_case__ : Union[str, Any] = data_set.copy()
snake_case__ : Dict = simplify(snake_case__ )
snake_case__ : str = simplified[::-1]
snake_case__ : list = []
for row in simplified:
snake_case__ : Any = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
snake_case__ : Dict = row.copy()[: len(snake_case__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(snake_case__ ) == 0:
solutions.append(0 )
continue
snake_case__ : Tuple = temp_row[1::]
snake_case__ : Dict = temp_row[::-1]
for column_index, column in enumerate(snake_case__ ):
current_solution -= column * solutions[column_index]
solutions.append(snake_case__ )
snake_case__ : List[Any] = []
for item in solutions:
final.append(float(round(snake_case__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Dict = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 694 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCAmelCase : Union[str, Any] = "\nimport os\n"
_lowerCAmelCase : Optional[int] = "\ndef foo():\n import os\n return False\n"
_lowerCAmelCase : Union[str, Any] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
_lowerCAmelCase : str = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
_lowerCAmelCase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
_lowerCAmelCase : List[str] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
_lowerCAmelCase : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case__ )
def _A ( snake_case__ : List[str] , snake_case__ : Dict ):
snake_case__ : str = os.path.join(snake_case__ , '''test_file.py''' )
with open(snake_case__ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case__ )
snake_case__ : int = get_imports(snake_case__ )
assert parsed_imports == ["os"]
| 694 | 1 |
'''simple docstring'''
def _A ( snake_case__ : int , snake_case__ : int ):
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
snake_case__ : Any = str(bin(snake_case__ ) )
binary_number += "0" * shift_amount
return binary_number
def _A ( snake_case__ : int , snake_case__ : int ):
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
snake_case__ : Optional[int] = str(bin(snake_case__ ) )[2:]
if shift_amount >= len(snake_case__ ):
return "0b0"
snake_case__ : Tuple = binary_number[: len(snake_case__ ) - shift_amount]
return "0b" + shifted_binary_number
def _A ( snake_case__ : int , snake_case__ : int ):
if number >= 0: # Get binary representation of positive number
snake_case__ : Dict = '''0''' + str(bin(snake_case__ ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case__ : Optional[Any] = len(bin(snake_case__ )[3:] ) # Find 2's complement of number
snake_case__ : str = bin(abs(snake_case__ ) - (1 << binary_number_length) )[3:]
snake_case__ : Tuple = (
'''1''' + '''0''' * (binary_number_length - len(snake_case__ )) + binary_number
)
if shift_amount >= len(snake_case__ ):
return "0b" + binary_number[0] * len(snake_case__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(snake_case__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'markuplm'
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=256 , lowerCamelCase=1024 , lowerCamelCase=216 , lowerCamelCase=1001 , lowerCamelCase=32 , lowerCamelCase=50 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> str:
"""simple docstring"""
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Optional[int] = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : List[Any] = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Optional[Any] = position_embedding_type
snake_case__ : Dict = use_cache
snake_case__ : int = classifier_dropout
# additional properties
snake_case__ : Union[str, Any] = max_depth
snake_case__ : Dict = max_xpath_tag_unit_embeddings
snake_case__ : Any = max_xpath_subs_unit_embeddings
snake_case__ : int = tag_pad_id
snake_case__ : Tuple = subs_pad_id
snake_case__ : Dict = xpath_unit_hidden_size
| 694 | 1 |
'''simple docstring'''
def _A ( snake_case__ : int = 10 , snake_case__ : int = 22 ):
snake_case__ : Union[str, Any] = range(1 , snake_case__ )
snake_case__ : Optional[Any] = range(1 , snake_case__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F'''{solution(1_0, 2_2) = }''')
| 694 |
'''simple docstring'''
def _A ( snake_case__ : float ):
return 10 - x * x
def _A ( snake_case__ : float , snake_case__ : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case__ ) * equation(snake_case__ ) >= 0:
raise ValueError('''Wrong space!''' )
snake_case__ : List[str] = a
while (b - a) >= 0.01:
# Find middle point
snake_case__ : Optional[int] = (a + b) / 2
# Check if middle point is root
if equation(snake_case__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case__ ) * equation(snake_case__ ) < 0:
snake_case__ : Dict = c
else:
snake_case__ : List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 694 | 1 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
def _A ( snake_case__ : List[Any]=None , snake_case__ : Any=None ):
return field(default_factory=lambda: default , metadata=snake_case__ )
@dataclass
class snake_case :
"""simple docstring"""
_lowerCAmelCase = list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
_lowerCAmelCase = list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
_lowerCAmelCase = list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
_lowerCAmelCase = field(default=__lowerCamelCase , metadata={'help': 'Use FP16 to accelerate inference.'} )
_lowerCAmelCase = field(default=__lowerCamelCase , metadata={'help': 'Benchmark training of model'} )
_lowerCAmelCase = field(default=__lowerCamelCase , metadata={'help': 'Verbose memory tracing'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
_lowerCAmelCase = field(default=__lowerCamelCase , metadata={'help': 'Trace memory line by line'} )
_lowerCAmelCase = field(default=__lowerCamelCase , metadata={'help': 'Save result to a CSV file'} )
_lowerCAmelCase = field(default=__lowerCamelCase , metadata={'help': 'Save all print statements in a log file'} )
_lowerCAmelCase = field(default=__lowerCamelCase , metadata={'help': 'Whether to print environment information'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
_lowerCAmelCase = field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
_lowerCAmelCase = field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
_lowerCAmelCase = field(
default=F'''train_time_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
_lowerCAmelCase = field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
_lowerCAmelCase = field(
default=F'''env_info_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving environment information.'} , )
_lowerCAmelCase = field(
default=F'''log_{round(time() )}.csv''' , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
_lowerCAmelCase = field(default=3 , metadata={'help': 'Times an experiment will be run.'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
warnings.warn(
f'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , lowerCamelCase , )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 694 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : list[float] , snake_case__ : list[float] ):
snake_case__ : Dict = sorted(numsa + numsa )
snake_case__ ,snake_case__ : Tuple = divmod(len(snake_case__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCAmelCase : List[str] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 694 | 1 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
_lowerCAmelCase : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])")
_lowerCAmelCase : int = re.compile(R"([a-z\d])([A-Z])")
_lowerCAmelCase : Tuple = re.compile(R"(?<!_)_(?!_)")
_lowerCAmelCase : Dict = re.compile(R"(_{2,})")
_lowerCAmelCase : List[Any] = R"^\w+(\.\w+)*$"
_lowerCAmelCase : Dict = R"<>:/\|?*"
def _A ( snake_case__ : Any ):
snake_case__ : Any = _uppercase_uppercase_re.sub(R'''\1_\2''' , snake_case__ )
snake_case__ : Optional[int] = _lowercase_uppercase_re.sub(R'''\1_\2''' , snake_case__ )
return name.lower()
def _A ( snake_case__ : Dict ):
snake_case__ : List[str] = _single_underscore_re.split(snake_case__ )
snake_case__ : str = [_multiple_underscores_re.split(snake_case__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(snake_case__ ) if n != '''''' )
def _A ( snake_case__ : Optional[Any] ):
if os.path.basename(snake_case__ ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(snake_case__ )
def _A ( snake_case__ : str , snake_case__ : List[str] ):
if os.path.basename(snake_case__ ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , snake_case__ ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(snake_case__ )}-{split}'''
def _A ( snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : int , snake_case__ : str=None ):
snake_case__ : str = filename_prefix_for_split(snake_case__ , snake_case__ )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
snake_case__ : Tuple = os.path.join(snake_case__ , snake_case__ )
return f'''{filepath}*'''
def _A ( snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Optional[Any]=None , snake_case__ : Dict=None ):
snake_case__ : str = filename_prefix_for_split(snake_case__ , snake_case__ )
snake_case__ : Tuple = os.path.join(snake_case__ , snake_case__ )
if shard_lengths:
snake_case__ : str = len(snake_case__ )
snake_case__ : Tuple = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(snake_case__ )]
if filetype_suffix:
snake_case__ : Tuple = [filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
snake_case__ : int = prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 694 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Any = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[Any] ):
# Initialise PyTorch model
snake_case__ : Any = RemBertConfig.from_json_file(snake_case__ )
print('''Building PyTorch model from configuration: {}'''.format(str(snake_case__ ) ) )
snake_case__ : List[str] = RemBertModel(snake_case__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(snake_case__ ) )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 694 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'encoder-decoder'
_lowerCAmelCase = True
def __init__( self , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case__ : List[str] = kwargs.pop('''encoder''' )
snake_case__ : Any = encoder_config.pop('''model_type''' )
snake_case__ : List[str] = kwargs.pop('''decoder''' )
snake_case__ : str = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case__ : Tuple = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : Optional[Any] = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : str = True
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case__ : Optional[int] = True
snake_case__ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = copy.deepcopy(self.__dict__ )
snake_case__ : List[Any] = self.encoder.to_dict()
snake_case__ : str = self.decoder.to_dict()
snake_case__ : Any = self.__class__.model_type
return output
| 694 | 1 |
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_lowerCAmelCase : List[str] = "bert-base-cased"
_lowerCAmelCase : Optional[Any] = "google/pegasus-xsum"
_lowerCAmelCase : Optional[Any] = [" Sam ate lunch today.", "Sams lunch ingredients."]
_lowerCAmelCase : Any = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
_lowerCAmelCase : List[str] = "patrickvonplaten/t5-tiny-random"
_lowerCAmelCase : Dict = "sshleifer/bart-tiny-random"
_lowerCAmelCase : Optional[Any] = "sshleifer/tiny-mbart"
_lowerCAmelCase : Tuple = "sshleifer/tiny-marian-en-de"
def _A ( snake_case__ : Path , snake_case__ : list ):
snake_case__ : str = '''\n'''.join(snake_case__ )
Path(snake_case__ ).open('''w''' ).writelines(snake_case__ )
def _A ( snake_case__ : Tuple ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(snake_case__ , f'''{split}.source''' ) , snake_case__ )
_dump_articles(os.path.join(snake_case__ , f'''{split}.target''' ) , snake_case__ )
return tmp_dir
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def lowercase__ ( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase )
snake_case__ : int = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
snake_case__ : Optional[int] = max(len(tokenizer.encode(lowerCamelCase ) ) for a in ARTICLES )
snake_case__ : List[Any] = max(len(tokenizer.encode(lowerCamelCase ) ) for a in SUMMARIES )
snake_case__ : Any = 4
snake_case__ : Tuple = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
snake_case__ ,snake_case__ : Optional[int] = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
snake_case__ : Any = SeqaSeqDataset(
lowerCamelCase , data_dir=lowerCamelCase , type_path='''train''' , max_source_length=lowerCamelCase , max_target_length=lowerCamelCase , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase , )
snake_case__ : Tuple = DataLoader(lowerCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase , lowerCamelCase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
snake_case__ : Any = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def lowercase__ ( self , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase )
snake_case__ : List[str] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
snake_case__ : str = max(len(tokenizer.encode(lowerCamelCase ) ) for a in ARTICLES )
snake_case__ : List[Any] = max(len(tokenizer.encode(lowerCamelCase ) ) for a in SUMMARIES )
snake_case__ : Optional[int] = 4
snake_case__ : List[str] = LegacySeqaSeqDataset(
lowerCamelCase , data_dir=lowerCamelCase , type_path='''train''' , max_source_length=20 , max_target_length=lowerCamelCase , )
snake_case__ : Dict = DataLoader(lowerCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : int = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
snake_case__ : str = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
snake_case__ : Optional[int] = tmp_dir.joinpath('''train.source''' ).open().readlines()
snake_case__ : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase , lowerCamelCase , 128 , lowerCamelCase )
snake_case__ : List[str] = {x.name for x in tmp_dir.iterdir()}
snake_case__ : Optional[int] = {x.name for x in save_dir.iterdir()}
snake_case__ : Optional[Any] = save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase ) < len(lowerCamelCase )
assert len(lowerCamelCase ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' )
def lowercase__ ( self ) -> int:
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
snake_case__ ,snake_case__ ,snake_case__ : Optional[Any] = self._get_dataset(max_len=64 )
snake_case__ : Union[str, Any] = 64
snake_case__ : Tuple = ds.make_dynamic_sampler(lowerCamelCase , required_batch_size_multiple=lowerCamelCase )
snake_case__ : List[Any] = [len(lowerCamelCase ) for x in batch_sampler]
assert len(set(lowerCamelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase ) == len(lowerCamelCase ) # no dropped or added examples
snake_case__ : Optional[int] = DataLoader(lowerCamelCase , batch_sampler=lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 )
snake_case__ : Optional[int] = []
snake_case__ : List[Any] = []
for batch in data_loader:
snake_case__ : Dict = batch['''input_ids'''].shape
snake_case__ : List[Any] = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
snake_case__ : Union[str, Any] = np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(lowerCamelCase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase )
assert num_src_per_batch[0] == max(lowerCamelCase )
if failures:
raise AssertionError(f'''too many tokens in {len(lowerCamelCase )} batches''' )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ ,snake_case__ ,snake_case__ : int = self._get_dataset(max_len=512 )
snake_case__ : Union[str, Any] = 2
snake_case__ : str = ds.make_sortish_sampler(lowerCamelCase , shuffle=lowerCamelCase )
snake_case__ : Any = DataLoader(lowerCamelCase , batch_size=lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 )
snake_case__ : str = DataLoader(lowerCamelCase , batch_size=lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase )
snake_case__ : Union[str, Any] = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase , lowerCamelCase="input_ids" ):
return [batch[k].eq(lowerCamelCase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase , k='''labels''' ) ) < sum(count_pad_tokens(lowerCamelCase , k='''labels''' ) )
assert sum(count_pad_tokens(lowerCamelCase ) ) < sum(count_pad_tokens(lowerCamelCase ) )
assert len(lowerCamelCase ) == len(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase=1000 , lowerCamelCase=128 ) -> Union[str, Any]:
"""simple docstring"""
if os.getenv('''USE_REAL_DATA''' , lowerCamelCase ):
snake_case__ : str = '''examples/seq2seq/wmt_en_ro'''
snake_case__ : Optional[Any] = max_len * 2 * 64
if not Path(lowerCamelCase ).joinpath('''train.len''' ).exists():
save_len_file(lowerCamelCase , lowerCamelCase )
else:
snake_case__ : int = '''examples/seq2seq/test_data/wmt_en_ro'''
snake_case__ : str = max_len * 4
save_len_file(lowerCamelCase , lowerCamelCase )
snake_case__ : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase )
snake_case__ : Tuple = SeqaSeqDataset(
lowerCamelCase , data_dir=lowerCamelCase , type_path='''train''' , max_source_length=lowerCamelCase , max_target_length=lowerCamelCase , n_obs=lowerCamelCase , )
return ds, max_tokens, tokenizer
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ ,snake_case__ ,snake_case__ : List[str] = self._get_dataset()
snake_case__ : str = set(DistributedSortishSampler(lowerCamelCase , 256 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase ) )
snake_case__ : int = set(DistributedSortishSampler(lowerCamelCase , 256 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase ) )
assert idsa.intersection(lowerCamelCase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def lowercase__ ( self , lowerCamelCase ) -> Any:
"""simple docstring"""
snake_case__ : str = AutoTokenizer.from_pretrained(lowerCamelCase , use_fast=lowerCamelCase )
if tok_name == MBART_TINY:
snake_case__ : List[Any] = SeqaSeqDataset(
lowerCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
snake_case__ : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
snake_case__ : str = SeqaSeqDataset(
lowerCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
snake_case__ : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase ) == 1 if tok_name == BART_TINY else len(lowerCamelCase ) == 0
| 694 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
_lowerCAmelCase : Dict = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
_lowerCAmelCase : str = {"vinai/bartpho-syllable": 1_0_2_4}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
snake_case__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
snake_case__ : int = vocab_file
snake_case__ : Optional[Any] = monolingual_vocab_file
snake_case__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : List[str] = cnt
cnt += 1
with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
snake_case__ : Optional[int] = line.strip().split()[0]
snake_case__ : List[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : Any = len(self.fairseq_tokens_to_ids )
snake_case__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = self.__dict__.copy()
snake_case__ : Any = None
snake_case__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : Dict = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : List[str] = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : int = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , '''wb''' ) as fi:
snake_case__ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowerCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ) -> List[str]:
"""simple docstring"""
snake_case__ : int = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
if return_labels:
if model_class in get_values(lowerCamelCase ):
snake_case__ : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = parent
snake_case__ : Tuple = batch_size
snake_case__ : str = seq_length
snake_case__ : List[Any] = is_training
snake_case__ : List[Any] = use_input_mask
snake_case__ : Optional[Any] = use_token_type_ids
snake_case__ : int = use_labels
snake_case__ : Any = vocab_size
snake_case__ : Dict = hidden_size
snake_case__ : str = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : str = intermediate_size
snake_case__ : Dict = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : int = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : Dict = type_sequence_label_size
snake_case__ : int = initializer_range
snake_case__ : str = num_labels
snake_case__ : int = num_choices
snake_case__ : int = scope
snake_case__ : int = embedding_size
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : List[str] = None
if self.use_input_mask:
snake_case__ : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : List[Any] = None
if self.use_token_type_ids:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Optional[Any] = None
snake_case__ : Dict = None
snake_case__ : int = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : str = TFMobileBertModel(config=lowerCamelCase )
snake_case__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case__ : Tuple = model(lowerCamelCase )
snake_case__ : Union[str, Any] = [input_ids, input_mask]
snake_case__ : Optional[Any] = model(lowerCamelCase )
snake_case__ : Optional[Any] = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = TFMobileBertForMaskedLM(config=lowerCamelCase )
snake_case__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case__ : Optional[Any] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Optional[Any] = TFMobileBertForNextSentencePrediction(config=lowerCamelCase )
snake_case__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case__ : Union[str, Any] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : Dict = TFMobileBertForPreTraining(config=lowerCamelCase )
snake_case__ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case__ : List[Any] = model(lowerCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Any = self.num_labels
snake_case__ : Optional[int] = TFMobileBertForSequenceClassification(config=lowerCamelCase )
snake_case__ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case__ : Optional[int] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : Dict = self.num_choices
snake_case__ : Any = TFMobileBertForMultipleChoice(config=lowerCamelCase )
snake_case__ : str = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Optional[Any] = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Optional[int] = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Any = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
snake_case__ : List[Any] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = self.num_labels
snake_case__ : Tuple = TFMobileBertForTokenClassification(config=lowerCamelCase )
snake_case__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case__ : List[str] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : List[str] = TFMobileBertForQuestionAnswering(config=lowerCamelCase )
snake_case__ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case__ : str = model(lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : int = self.prepare_config_and_inputs()
(
(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,
) : Union[str, Any] = config_and_inputs
snake_case__ : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self )
snake_case__ : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase )
@slow
def lowercase__ ( self ) -> int:
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
snake_case__ : int = TFMobileBertModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
snake_case__ : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case__ : Any = model(lowerCamelCase )[0]
snake_case__ : List[Any] = [1, 6, 30522]
self.assertEqual(output.shape , lowerCamelCase )
snake_case__ : str = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase , atol=1E-4 )
| 694 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowerCAmelCase : str = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowerCAmelCase : Optional[int] = parser.parse_args()
_lowerCAmelCase : Union[str, Any] = "cpu"
_lowerCAmelCase : List[str] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowerCAmelCase : Union[str, Any] = "path-to-your-trained-model"
_lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowerCAmelCase : Optional[Any] = pipe.to(device)
# to channels last
_lowerCAmelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last)
_lowerCAmelCase : str = pipe.vae.to(memory_format=torch.channels_last)
_lowerCAmelCase : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowerCAmelCase : Optional[int] = torch.randn(2, 4, 6_4, 6_4)
_lowerCAmelCase : List[str] = torch.rand(1) * 9_9_9
_lowerCAmelCase : Optional[int] = torch.randn(2, 7_7, 7_6_8)
_lowerCAmelCase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowerCAmelCase : Tuple = 6_6_6
_lowerCAmelCase : str = torch.Generator(device).manual_seed(seed)
_lowerCAmelCase : Dict = {"generator": generator}
if args.steps is not None:
_lowerCAmelCase : Tuple = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowerCAmelCase : Any = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 694 | 1 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class snake_case :
"""simple docstring"""
def __init__( self ) -> None:
"""simple docstring"""
snake_case__ : Optional[Any] = [2, 1, 2, -1]
snake_case__ : Tuple = [1, 2, 3, 4]
def lowercase__ ( self ) -> list[float]:
"""simple docstring"""
snake_case__ : Any = len(self.first_signal )
snake_case__ : List[str] = len(self.second_signal )
snake_case__ : List[str] = max(lowerCamelCase , lowerCamelCase )
# create a zero matrix of max_length x max_length
snake_case__ : Union[str, Any] = [[0] * max_length for i in range(lowerCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCamelCase ):
snake_case__ : Optional[Any] = deque(self.second_signal )
rotated_signal.rotate(lowerCamelCase )
for j, item in enumerate(lowerCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
snake_case__ : Optional[Any] = np.matmul(np.transpose(lowerCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 694 |
'''simple docstring'''
import socket
def _A ( ):
snake_case__ : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case__ : str = socket.gethostname()
snake_case__ : Union[str, Any] = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
snake_case__ : int = sock.recv(10_24 )
if not data:
break
out_file.write(snake_case__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 694 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'trocr'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=1024 , lowerCamelCase=12 , lowerCamelCase=16 , lowerCamelCase=4096 , lowerCamelCase="gelu" , lowerCamelCase=512 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = vocab_size
snake_case__ : List[Any] = d_model
snake_case__ : Optional[int] = decoder_layers
snake_case__ : List[str] = decoder_attention_heads
snake_case__ : List[str] = decoder_ffn_dim
snake_case__ : Optional[int] = activation_function
snake_case__ : Any = max_position_embeddings
snake_case__ : List[Any] = dropout
snake_case__ : Any = attention_dropout
snake_case__ : Tuple = activation_dropout
snake_case__ : Optional[int] = init_std
snake_case__ : int = decoder_layerdrop
snake_case__ : Any = use_cache
snake_case__ : Optional[int] = scale_embedding
snake_case__ : Optional[Any] = use_learned_position_embeddings
snake_case__ : List[str] = layernorm_embedding
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , decoder_start_token_id=lowerCamelCase , **lowerCamelCase , )
| 694 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
snake_case__ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 255 , lowerCamelCase=True , ) -> Any:
"""simple docstring"""
snake_case__ : Any = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
snake_case__ : List[str] = parent
snake_case__ : str = batch_size
snake_case__ : str = num_channels
snake_case__ : Tuple = min_resolution
snake_case__ : List[str] = max_resolution
snake_case__ : List[Any] = do_resize
snake_case__ : int = size
snake_case__ : List[str] = do_normalize
snake_case__ : int = image_mean
snake_case__ : Tuple = image_std
snake_case__ : Any = do_rescale
snake_case__ : Optional[int] = rescale_factor
snake_case__ : List[Any] = do_pad
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=False ) -> List[str]:
"""simple docstring"""
if not batched:
snake_case__ : List[str] = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
snake_case__ ,snake_case__ : Dict = image.size
else:
snake_case__ ,snake_case__ : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Optional[Any] = int(self.size['''shortest_edge'''] * h / w )
snake_case__ : Dict = self.size['''shortest_edge''']
elif w > h:
snake_case__ : str = self.size['''shortest_edge''']
snake_case__ : Optional[int] = int(self.size['''shortest_edge'''] * w / h )
else:
snake_case__ : Optional[int] = self.size['''shortest_edge''']
snake_case__ : Union[str, Any] = self.size['''shortest_edge''']
else:
snake_case__ : Any = []
for image in image_inputs:
snake_case__ ,snake_case__ : Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : Tuple = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
snake_case__ : str = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = YolosImageProcessor if is_vision_available() else None
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Any = YolosImageProcessingTester(self )
@property
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''size''' ) )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
snake_case__ : List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
pass
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
snake_case__ : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case__ ,snake_case__ : Any = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ ,snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
snake_case__ : Union[str, Any] = image_processing(lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case__ ,snake_case__ : str = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(lowerCamelCase , return_tensors='''pt''' ).pixel_values
snake_case__ ,snake_case__ : Tuple = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case__ ,snake_case__ : Any = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : List[Any] = image_processing(lowerCamelCase , return_tensors='''pt''' ).pixel_values
snake_case__ ,snake_case__ : Any = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
snake_case__ : int = self.image_processing_class(do_resize=lowerCamelCase , do_normalize=lowerCamelCase , do_rescale=lowerCamelCase )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
snake_case__ : List[str] = image_processing_a.pad(lowerCamelCase , return_tensors='''pt''' )
snake_case__ : str = image_processing_a(lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) )
@slow
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
snake_case__ : List[Any] = json.loads(f.read() )
snake_case__ : Union[str, Any] = {'''image_id''': 39769, '''annotations''': target}
# encode them
snake_case__ : List[str] = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
snake_case__ : Dict = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCamelCase )
snake_case__ : List[Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
snake_case__ : List[Any] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCamelCase ) )
# verify boxes
snake_case__ : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCamelCase )
snake_case__ : List[str] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCamelCase ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCamelCase ) )
# verify class_labels
snake_case__ : Dict = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCamelCase ) )
# verify orig_size
snake_case__ : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCamelCase ) )
# verify size
snake_case__ : Optional[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCamelCase ) )
@slow
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
snake_case__ : List[str] = json.loads(f.read() )
snake_case__ : int = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
snake_case__ : Union[str, Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
snake_case__ : List[Any] = YolosImageProcessor(format='''coco_panoptic''' )
snake_case__ : Any = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
snake_case__ : int = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCamelCase )
snake_case__ : List[str] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
snake_case__ : List[Any] = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCamelCase ) )
# verify boxes
snake_case__ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCamelCase )
snake_case__ : Union[str, Any] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
snake_case__ : Optional[int] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCamelCase ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCamelCase ) )
# verify class_labels
snake_case__ : int = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCamelCase ) )
# verify masks
snake_case__ : Optional[int] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowerCamelCase )
# verify orig_size
snake_case__ : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCamelCase ) )
# verify size
snake_case__ : int = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCamelCase ) )
| 694 |
'''simple docstring'''
from math import isqrt
def _A ( snake_case__ : int ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) )
def _A ( snake_case__ : int = 10**6 ):
snake_case__ : str = 0
snake_case__ : List[str] = 1
snake_case__ : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 1 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : int = {"vocab_file": "spiece.model"}
_lowerCAmelCase : Tuple = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<sep>" , lowerCamelCase="<pad>" , lowerCamelCase="<cls>" , lowerCamelCase="<mask>" , lowerCamelCase=["<eop>", "<eod>"] , lowerCamelCase = None , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : Tuple = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
snake_case__ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase , remove_space=lowerCamelCase , keep_accents=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , additional_special_tokens=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
snake_case__ : Any = 3
snake_case__ : Dict = do_lower_case
snake_case__ : str = remove_space
snake_case__ : int = keep_accents
snake_case__ : Tuple = vocab_file
snake_case__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
snake_case__ : Dict = jieba
snake_case__ : int = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return len(self.sp_model )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[int] = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
"""simple docstring"""
snake_case__ : Optional[int] = self.__dict__.copy()
snake_case__ : str = None
return state
def __setstate__( self , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : str = {}
snake_case__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if self.remove_space:
snake_case__ : Optional[int] = ''' '''.join(inputs.strip().split() )
else:
snake_case__ : Union[str, Any] = inputs
snake_case__ : Tuple = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
snake_case__ : Tuple = unicodedata.normalize('''NFKD''' , lowerCamelCase )
snake_case__ : int = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase )] )
if self.do_lower_case:
snake_case__ : Any = outputs.lower()
return outputs
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[Any] = self.preprocess_text(lowerCamelCase )
snake_case__ : Tuple = self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
snake_case__ : List[str] = []
for piece in pieces:
if len(lowerCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
snake_case__ : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case__ : Dict = cur_pieces[1:]
else:
snake_case__ : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase )
else:
new_pieces.append(lowerCamelCase )
return new_pieces
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
return self.sp_model.PieceToId(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Optional[int] = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : Optional[int] = [self.sep_token_id]
snake_case__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is not None:
return ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) + [1, 1]
return ([0] * len(lowerCamelCase )) + [1, 1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : Dict = [self.sep_token_id]
snake_case__ : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Union[str, Any] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , '''wb''' ) as fi:
snake_case__ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
def lowercase__ ( self , *lowerCamelCase , **lowerCamelCase ) -> Any:
"""simple docstring"""
snake_case__ : List[str] = super()._decode(*lowerCamelCase , **lowerCamelCase )
snake_case__ : Any = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 694 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_lowerCAmelCase : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
_lowerCAmelCase : Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
_lowerCAmelCase : List[str] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 , lowerCamelCase="binary" , lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = fa_score(
lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase )
return {"f1": float(lowerCamelCase ) if score.size == 1 else score}
| 694 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.