code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
def _A ( snake_case__ : str , snake_case__ : str = " " ):
snake_case__ : List[Any] = []
snake_case__ : Union[str, Any] = 0
for index, char in enumerate(snake_case__ ):
if char == separator:
split_words.append(string[last_index:index] )
snake_case__ : List[str] = index + 1
elif index + 1 == len(snake_case__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 701 |
'''simple docstring'''
def _A ( snake_case__ : int = 4_00_00_00 ):
snake_case__ : int = []
snake_case__ ,snake_case__ : Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case__ )
snake_case__ ,snake_case__ : Any = b, a + b
return sum(snake_case__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 0 |
'''simple docstring'''
import numpy as np
def _A ( snake_case__ : np.ndarray , snake_case__ : np.ndarray , snake_case__ : float = 1E-12 , snake_case__ : int = 1_00 , ):
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case__ ) == np.iscomplexobj(snake_case__ )
snake_case__ : List[Any] = np.iscomplexobj(snake_case__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
snake_case__ : Dict = False
snake_case__ : List[str] = 0
snake_case__ : Union[str, Any] = 0
snake_case__ : Optional[Any] = 1E12
while not convergence:
# Multiple matrix by the vector.
snake_case__ : Optional[Any] = np.dot(snake_case__ , snake_case__ )
# Normalize the resulting output vector.
snake_case__ : List[Any] = w / np.linalg.norm(snake_case__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
snake_case__ : Any = vector.conj().T if is_complex else vector.T
snake_case__ : int = np.dot(snake_case__ , np.dot(snake_case__ , snake_case__ ) )
# Check convergence.
snake_case__ : int = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
snake_case__ : Tuple = True
snake_case__ : Optional[Any] = lambda_
if is_complex:
snake_case__ : Dict = np.real(lambda_ )
return lambda_, vector
def _A ( ):
snake_case__ : int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
snake_case__ : Dict = np.array([41, 4, 20] )
snake_case__ : Union[str, Any] = real_input_matrix.astype(np.complexaaa )
snake_case__ : List[Any] = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
snake_case__ : Dict = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
snake_case__ : Union[str, Any] = real_input_matrix
snake_case__ : Union[str, Any] = real_vector
elif problem_type == "complex":
snake_case__ : List[Any] = complex_input_matrix
snake_case__ : List[Any] = complex_vector
# Our implementation.
snake_case__ : Dict = power_iteration(snake_case__ , snake_case__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
snake_case__ : List[Any] = np.linalg.eigh(snake_case__ )
# Last eigenvalue is the maximum one.
snake_case__ : Any = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
snake_case__ : Union[str, Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case__ ) - np.abs(snake_case__ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 702 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : int = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
_lowerCAmelCase : Optional[int] = {
"google/pegasus-xsum": 5_1_2,
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PegasusTokenizer
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<pad>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<mask_2>" , lowerCamelCase="<mask_1>" , lowerCamelCase=None , lowerCamelCase=103 , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCamelCase )}, but is'''
f''' {type(lowerCamelCase )}''' )
snake_case__ : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCamelCase ) , self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
snake_case__ : List[Any] = additional_special_tokens_extended
else:
snake_case__ : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , pad_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , mask_token=lowerCamelCase , mask_token_sent=lowerCamelCase , offset=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Union[str, Any] = vocab_file
snake_case__ : List[Any] = False if not self.vocab_file else True
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Tuple = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : int = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 694 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ) -> List[str]:
"""simple docstring"""
snake_case__ : int = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
if return_labels:
if model_class in get_values(lowerCamelCase ):
snake_case__ : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = parent
snake_case__ : Tuple = batch_size
snake_case__ : str = seq_length
snake_case__ : List[Any] = is_training
snake_case__ : List[Any] = use_input_mask
snake_case__ : Optional[Any] = use_token_type_ids
snake_case__ : int = use_labels
snake_case__ : Any = vocab_size
snake_case__ : Dict = hidden_size
snake_case__ : str = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : str = intermediate_size
snake_case__ : Dict = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : int = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : Dict = type_sequence_label_size
snake_case__ : int = initializer_range
snake_case__ : str = num_labels
snake_case__ : int = num_choices
snake_case__ : int = scope
snake_case__ : int = embedding_size
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : List[str] = None
if self.use_input_mask:
snake_case__ : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : List[Any] = None
if self.use_token_type_ids:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Optional[Any] = None
snake_case__ : Dict = None
snake_case__ : int = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : str = TFMobileBertModel(config=lowerCamelCase )
snake_case__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case__ : Tuple = model(lowerCamelCase )
snake_case__ : Union[str, Any] = [input_ids, input_mask]
snake_case__ : Optional[Any] = model(lowerCamelCase )
snake_case__ : Optional[Any] = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = TFMobileBertForMaskedLM(config=lowerCamelCase )
snake_case__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case__ : Optional[Any] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Optional[Any] = TFMobileBertForNextSentencePrediction(config=lowerCamelCase )
snake_case__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case__ : Union[str, Any] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : Dict = TFMobileBertForPreTraining(config=lowerCamelCase )
snake_case__ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case__ : List[Any] = model(lowerCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Any = self.num_labels
snake_case__ : Optional[int] = TFMobileBertForSequenceClassification(config=lowerCamelCase )
snake_case__ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case__ : Optional[int] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : Dict = self.num_choices
snake_case__ : Any = TFMobileBertForMultipleChoice(config=lowerCamelCase )
snake_case__ : str = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Optional[Any] = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Optional[int] = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Any = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
snake_case__ : List[Any] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = self.num_labels
snake_case__ : Tuple = TFMobileBertForTokenClassification(config=lowerCamelCase )
snake_case__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case__ : List[str] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : List[str] = TFMobileBertForQuestionAnswering(config=lowerCamelCase )
snake_case__ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case__ : str = model(lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : int = self.prepare_config_and_inputs()
(
snake_case__
) : Union[str, Any] = config_and_inputs
snake_case__ : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self )
snake_case__ : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase )
@slow
def lowercase__ ( self ) -> int:
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
snake_case__ : int = TFMobileBertModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
snake_case__ : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case__ : Any = model(lowerCamelCase )[0]
snake_case__ : List[Any] = [1, 6, 30522]
self.assertEqual(output.shape , lowerCamelCase )
snake_case__ : str = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase , atol=1E-4 )
| 703 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=0 ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[Any] = 1.0 if scale is None else scale
snake_case__ : Dict = 0.0 if loc is None else loc
super().__init__(lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase )] )
@property
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return self.variance.sqrt()
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Tuple = args_dim
snake_case__ : str = nn.ModuleList([nn.Linear(lowerCamelCase , lowerCamelCase ) for dim in args_dim.values()] )
snake_case__ : Optional[int] = domain_map
def lowercase__ ( self , lowerCamelCase ) -> Tuple[torch.Tensor]:
"""simple docstring"""
snake_case__ : Any = [proj(lowerCamelCase ) for proj in self.proj]
return self.domain_map(*lowerCamelCase )
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Tuple = function
def lowercase__ ( self , lowerCamelCase , *lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.function(lowerCamelCase , *lowerCamelCase )
class snake_case :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self , lowerCamelCase = 1 ) -> None:
"""simple docstring"""
snake_case__ : Optional[Any] = dim
snake_case__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowerCamelCase )
else:
return Independent(self.distribution_class(*lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Distribution:
"""simple docstring"""
snake_case__ : List[Any] = self._base_distribution(lowerCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase , loc=lowerCamelCase , scale=lowerCamelCase , event_dim=self.event_dim )
@property
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def lowercase__ ( self ) -> float:
"""simple docstring"""
return 0.0
def lowercase__ ( self , lowerCamelCase ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowercase__ ( self , *lowerCamelCase ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def lowercase__ ( lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Tuple = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
snake_case__ : Optional[int] = 2.0 + cls.squareplus(lowerCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowercase__ ( self , lowerCamelCase ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : str = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 | 0 |
'''simple docstring'''
def _A ( snake_case__ : int = 1 , snake_case__ : int = 10_00 ):
snake_case__ : Union[str, Any] = 1
snake_case__ : List[str] = 0
for divide_by_number in range(snake_case__ , digit + 1 ):
snake_case__ : list[int] = []
snake_case__ : Any = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(snake_case__ ):
snake_case__ : Any = len(snake_case__ )
snake_case__ : Union[str, Any] = divide_by_number
else:
has_been_divided.append(snake_case__ )
snake_case__ : str = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
'''simple docstring'''
from math import factorial
def _A ( snake_case__ : int = 20 ):
snake_case__ : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case__ : Union[str, Any] = n // 2
return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
_lowerCAmelCase : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 694 | 0 |
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'philschmid/bart-large-cnn-samsum'
_lowerCAmelCase = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
_lowerCAmelCase = 'summarizer'
_lowerCAmelCase = AutoTokenizer
_lowerCAmelCase = AutoModelForSeqaSeqLM
_lowerCAmelCase = ['text']
_lowerCAmelCase = ['text']
def lowercase__ ( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
return self.pre_processor(lowerCamelCase , return_tensors='''pt''' , truncation=lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
return self.model.generate(**lowerCamelCase )[0]
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
return self.pre_processor.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
| 705 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = (EulerDiscreteScheduler,)
_lowerCAmelCase = 1_0
def lowercase__ ( self , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Any = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCamelCase )
return config
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Any = self.get_scheduler_config()
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : Any = self.dummy_model()
snake_case__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : List[Any] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : int = model(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Tuple = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Tuple = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Optional[Any] = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : Optional[int] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : Union[str, Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Union[str, Any] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : List[str] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Optional[int] = self.get_scheduler_config()
snake_case__ : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Tuple = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : str = model(lowerCamelCase , lowerCamelCase )
snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : int = output.prev_sample
snake_case__ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : int = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Dict = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config()
snake_case__ : List[Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Dict = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Optional[Any] = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : Dict = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
| 694 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Any = logging.get_logger(__name__)
set_seed(7_7_0)
_lowerCAmelCase : Optional[int] = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
_lowerCAmelCase : int = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
_lowerCAmelCase : int = os.path.dirname(os.path.abspath(__file__))
_lowerCAmelCase : List[Any] = os.path.join(os.path.expanduser("~"), ".cache")
_lowerCAmelCase : List[str] = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def _A ( snake_case__ : Any , snake_case__ : Any=False ):
snake_case__ : Dict = model_type
if use_small:
key += "_small"
return os.path.join(snake_case__ , REMOTE_MODEL_PATHS[key]['''file_name'''] )
def _A ( snake_case__ : str , snake_case__ : str ):
os.makedirs(snake_case__ , exist_ok=snake_case__ )
hf_hub_download(repo_id=snake_case__ , filename=snake_case__ , local_dir=snake_case__ )
def _A ( snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : List[str]=False , snake_case__ : Optional[int]="text" ):
if model_type == "text":
snake_case__ : Any = BarkSemanticModel
snake_case__ : int = BarkSemanticConfig
snake_case__ : int = BarkSemanticGenerationConfig
elif model_type == "coarse":
snake_case__ : List[str] = BarkCoarseModel
snake_case__ : Dict = BarkCoarseConfig
snake_case__ : Union[str, Any] = BarkCoarseGenerationConfig
elif model_type == "fine":
snake_case__ : Dict = BarkFineModel
snake_case__ : Union[str, Any] = BarkFineConfig
snake_case__ : Union[str, Any] = BarkFineGenerationConfig
else:
raise NotImplementedError()
snake_case__ : List[Any] = f'''{model_type}_small''' if use_small else model_type
snake_case__ : Tuple = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(snake_case__ ):
logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info['''repo_id'''] , model_info['''file_name'''] )
snake_case__ : Union[str, Any] = torch.load(snake_case__ , map_location=snake_case__ )
# this is a hack
snake_case__ : Any = checkpoint['''model_args''']
if "input_vocab_size" not in model_args:
snake_case__ : Dict = model_args['''vocab_size''']
snake_case__ : Any = model_args['''vocab_size''']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
snake_case__ : Tuple = model_args.pop('''n_head''' )
snake_case__ : Dict = model_args.pop('''n_embd''' )
snake_case__ : Dict = model_args.pop('''n_layer''' )
snake_case__ : str = ConfigClass(**checkpoint['''model_args'''] )
snake_case__ : Dict = ModelClass(config=snake_case__ )
snake_case__ : List[Any] = GenerationConfigClass()
snake_case__ : Optional[int] = model_generation_config
snake_case__ : List[Any] = checkpoint['''model''']
# fixup checkpoint
snake_case__ : int = '''_orig_mod.'''
for k, v in list(state_dict.items() ):
if k.startswith(snake_case__ ):
# replace part of the key with corresponding layer name in HF implementation
snake_case__ : str = k[len(snake_case__ ) :]
for old_layer_name in new_layer_name_dict:
snake_case__ : Union[str, Any] = new_k.replace(snake_case__ , new_layer_name_dict[old_layer_name] )
snake_case__ : Any = state_dict.pop(snake_case__ )
snake_case__ : List[str] = set(state_dict.keys() ) - set(model.state_dict().keys() )
snake_case__ : Tuple = {k for k in extra_keys if not k.endswith('''.attn.bias''' )}
snake_case__ : Optional[int] = set(model.state_dict().keys() ) - set(state_dict.keys() )
snake_case__ : List[Any] = {k for k in missing_keys if not k.endswith('''.attn.bias''' )}
if len(snake_case__ ) != 0:
raise ValueError(f'''extra keys found: {extra_keys}''' )
if len(snake_case__ ) != 0:
raise ValueError(f'''missing keys: {missing_keys}''' )
model.load_state_dict(snake_case__ , strict=snake_case__ )
snake_case__ : Optional[int] = model.num_parameters(exclude_embeddings=snake_case__ )
snake_case__ : Union[str, Any] = checkpoint['''best_val_loss'''].item()
logger.info(f'''model loaded: {round(n_params/1E6 , 1 )}M params, {round(snake_case__ , 3 )} loss''' )
model.eval()
model.to(snake_case__ )
del checkpoint, state_dict
return model
def _A ( snake_case__ : str , snake_case__ : Union[str, Any]=False , snake_case__ : int="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
snake_case__ : List[Any] = '''cpu''' # do conversion on cpu
snake_case__ : int = _get_ckpt_path(snake_case__ , use_small=snake_case__ )
snake_case__ : str = _load_model(snake_case__ , snake_case__ , model_type=snake_case__ , use_small=snake_case__ )
# load bark initial model
snake_case__ : str = _bark_load_model(snake_case__ , '''cpu''' , model_type=snake_case__ , use_small=snake_case__ )
if model_type == "text":
snake_case__ : Tuple = bark_model['''model''']
if model.num_parameters(exclude_embeddings=snake_case__ ) != bark_model.get_num_params():
raise ValueError('''initial and new models don\'t have the same number of parameters''' )
# check if same output as the bark model
snake_case__ : Optional[Any] = 5
snake_case__ : Optional[int] = 10
if model_type in ["text", "coarse"]:
snake_case__ : Tuple = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int )
snake_case__ : Dict = bark_model(snake_case__ )[0]
snake_case__ : str = model(snake_case__ )
# take last logits
snake_case__ : Union[str, Any] = output_new_model_total.logits[:, [-1], :]
else:
snake_case__ : Union[str, Any] = 3
snake_case__ : List[str] = 8
snake_case__ : Optional[int] = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
snake_case__ : Tuple = model(snake_case__ , snake_case__ )
snake_case__ : List[Any] = bark_model(snake_case__ , snake_case__ )
snake_case__ : List[str] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('''initial and new outputs don\'t have the same shape''' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('''initial and new outputs are not equal''' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
def _A ( snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : str , ):
snake_case__ : Tuple = os.path.join(snake_case__ , snake_case__ )
snake_case__ : Any = BarkSemanticConfig.from_pretrained(os.path.join(snake_case__ , '''config.json''' ) )
snake_case__ : Optional[Any] = BarkCoarseConfig.from_pretrained(os.path.join(snake_case__ , '''config.json''' ) )
snake_case__ : Union[str, Any] = BarkFineConfig.from_pretrained(os.path.join(snake_case__ , '''config.json''' ) )
snake_case__ : Optional[int] = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' )
snake_case__ : Union[str, Any] = BarkSemanticModel.from_pretrained(snake_case__ )
snake_case__ : Optional[Any] = BarkCoarseModel.from_pretrained(snake_case__ )
snake_case__ : Optional[int] = BarkFineModel.from_pretrained(snake_case__ )
snake_case__ : Optional[int] = EncodecModel.from_pretrained('''facebook/encodec_24khz''' )
snake_case__ : List[str] = BarkConfig.from_sub_model_configs(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
snake_case__ : Dict = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
snake_case__ : Any = BarkModel(snake_case__ )
snake_case__ : Optional[int] = semantic
snake_case__ : str = coarseAcoustic
snake_case__ : Dict = fineAcoustic
snake_case__ : Optional[Any] = codec
snake_case__ : Optional[Any] = bark_generation_config
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
bark.save_pretrained(snake_case__ , repo_id=snake_case__ , push_to_hub=snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
_lowerCAmelCase : int = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 706 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = ['pixel_values']
def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : int = do_resize
snake_case__ : Dict = do_rescale
snake_case__ : Any = size_divisor
snake_case__ : str = resample
super().__init__(**lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
snake_case__ ,snake_case__ : Any = get_image_size(lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
snake_case__ : Any = height // size_divisor * size_divisor
snake_case__ : Union[str, Any] = width // size_divisor * size_divisor
snake_case__ : Tuple = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
return image
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> BatchFeature:
"""simple docstring"""
snake_case__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : Any = size_divisor if size_divisor is not None else self.size_divisor
snake_case__ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
snake_case__ : Optional[Any] = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
snake_case__ : Optional[int] = [to_numpy_array(lowerCamelCase ) for img in images]
if do_resize:
snake_case__ : Union[str, Any] = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
snake_case__ : str = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images]
snake_case__ : Tuple = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
snake_case__ : str = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 694 | 0 |
'''simple docstring'''
_lowerCAmelCase = 2_5_6
# Modulus to hash a string
_lowerCAmelCase = 1_0_0_0_0_0_3
def _A ( snake_case__ : str , snake_case__ : str ):
snake_case__ : List[str] = len(snake_case__ )
snake_case__ : Union[str, Any] = len(snake_case__ )
if p_len > t_len:
return False
snake_case__ : Optional[int] = 0
snake_case__ : str = 0
snake_case__ : Union[str, Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(snake_case__ ):
snake_case__ : int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case__ : int = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case__ : Optional[int] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case__ : List[Any] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _A ( ):
snake_case__ : Optional[int] = '''abc1abc12'''
snake_case__ : Dict = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
snake_case__ : List[str] = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(snake_case__ , snake_case__ ) and not rabin_karp(snake_case__ , snake_case__ )
# Test 2)
snake_case__ : List[Any] = '''ABABX'''
snake_case__ : Tuple = '''ABABZABABYABABX'''
assert rabin_karp(snake_case__ , snake_case__ )
# Test 3)
snake_case__ : str = '''AAAB'''
snake_case__ : str = '''ABAAAAAB'''
assert rabin_karp(snake_case__ , snake_case__ )
# Test 4)
snake_case__ : List[Any] = '''abcdabcy'''
snake_case__ : Optional[Any] = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(snake_case__ , snake_case__ )
# Test 5)
snake_case__ : Dict = '''Lü'''
snake_case__ : Tuple = '''Lüsai'''
assert rabin_karp(snake_case__ , snake_case__ )
snake_case__ : Tuple = '''Lue'''
assert not rabin_karp(snake_case__ , snake_case__ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 707 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ):
snake_case__ : List[Any] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
| 694 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowerCAmelCase : List[str] = (7_2_0, 1_2_8_0) # Height, Width
_lowerCAmelCase : List[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowerCAmelCase : List[Any] = 1 / 1_0_0
_lowerCAmelCase : List[Any] = ""
_lowerCAmelCase : List[str] = ""
_lowerCAmelCase : int = ""
_lowerCAmelCase : List[str] = 2_5_0
def _A ( ):
snake_case__ : List[str] = get_dataset(snake_case__ , snake_case__ )
for index in range(snake_case__ ):
snake_case__ : List[Any] = random.sample(range(len(snake_case__ ) ) , 4 )
snake_case__ : Optional[int] = update_image_and_anno(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , filter_scale=snake_case__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case__ : Tuple = random_chars(32 )
snake_case__ : str = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
snake_case__ : Dict = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' , snake_case__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
snake_case__ : Union[str, Any] = []
for anno in new_annos:
snake_case__ : Dict = anno[3] - anno[1]
snake_case__ : Union[str, Any] = anno[4] - anno[2]
snake_case__ : Dict = anno[1] + width / 2
snake_case__ : Optional[int] = anno[2] + height / 2
snake_case__ : Dict = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(snake_case__ )
with open(f'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _A ( snake_case__ : str , snake_case__ : str ):
snake_case__ : List[Any] = []
snake_case__ : Union[str, Any] = []
for label_file in glob.glob(os.path.join(snake_case__ , '''*.txt''' ) ):
snake_case__ : str = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(snake_case__ ) as in_file:
snake_case__ : int = in_file.readlines()
snake_case__ : List[str] = os.path.join(snake_case__ , f'''{label_name}.jpg''' )
snake_case__ : int = []
for obj_list in obj_lists:
snake_case__ : int = obj_list.rstrip('''\n''' ).split(''' ''' )
snake_case__ : Any = float(obj[1] ) - float(obj[3] ) / 2
snake_case__ : List[Any] = float(obj[2] ) - float(obj[4] ) / 2
snake_case__ : Union[str, Any] = float(obj[1] ) + float(obj[3] ) / 2
snake_case__ : Any = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case__ )
labels.append(snake_case__ )
return img_paths, labels
def _A ( snake_case__ : list , snake_case__ : list , snake_case__ : list[int] , snake_case__ : tuple[int, int] , snake_case__ : tuple[float, float] , snake_case__ : float = 0.0 , ):
snake_case__ : Any = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
snake_case__ : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case__ : Any = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case__ : Any = int(scale_x * output_size[1] )
snake_case__ : Tuple = int(scale_y * output_size[0] )
snake_case__ : List[str] = []
snake_case__ : List[str] = []
for i, index in enumerate(snake_case__ ):
snake_case__ : Any = all_img_list[index]
path_list.append(snake_case__ )
snake_case__ : Tuple = all_annos[index]
snake_case__ : Optional[int] = cva.imread(snake_case__ )
if i == 0: # top-left
snake_case__ : Dict = cva.resize(snake_case__ , (divid_point_x, divid_point_y) )
snake_case__ : Any = img
for bbox in img_annos:
snake_case__ : Tuple = bbox[1] * scale_x
snake_case__ : Union[str, Any] = bbox[2] * scale_y
snake_case__ : int = bbox[3] * scale_x
snake_case__ : List[str] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
snake_case__ : Tuple = cva.resize(snake_case__ , (output_size[1] - divid_point_x, divid_point_y) )
snake_case__ : Optional[int] = img
for bbox in img_annos:
snake_case__ : List[Any] = scale_x + bbox[1] * (1 - scale_x)
snake_case__ : Dict = bbox[2] * scale_y
snake_case__ : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
snake_case__ : int = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
snake_case__ : Tuple = cva.resize(snake_case__ , (divid_point_x, output_size[0] - divid_point_y) )
snake_case__ : Union[str, Any] = img
for bbox in img_annos:
snake_case__ : List[Any] = bbox[1] * scale_x
snake_case__ : Dict = scale_y + bbox[2] * (1 - scale_y)
snake_case__ : int = bbox[3] * scale_x
snake_case__ : Tuple = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
snake_case__ : Any = cva.resize(
snake_case__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
snake_case__ : Tuple = img
for bbox in img_annos:
snake_case__ : Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
snake_case__ : str = scale_y + bbox[2] * (1 - scale_y)
snake_case__ : List[str] = scale_x + bbox[3] * (1 - scale_x)
snake_case__ : Optional[int] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
snake_case__ : List[str] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _A ( snake_case__ : int ):
assert number_char > 1, "The number of character should greater than 1"
snake_case__ : Dict = ascii_lowercase + digits
return "".join(random.choice(snake_case__ ) for _ in range(snake_case__ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 708 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
snake_case__ : Optional[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
'''simple docstring'''
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 709 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCAmelCase : Union[str, Any] = "\nimport os\n"
_lowerCAmelCase : Optional[int] = "\ndef foo():\n import os\n return False\n"
_lowerCAmelCase : Union[str, Any] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
_lowerCAmelCase : str = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
_lowerCAmelCase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
_lowerCAmelCase : List[str] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
_lowerCAmelCase : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case__ )
def _A ( snake_case__ : List[str] , snake_case__ : Dict ):
snake_case__ : str = os.path.join(snake_case__ , '''test_file.py''' )
with open(snake_case__ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case__ )
snake_case__ : int = get_imports(snake_case__ )
assert parsed_imports == ["os"]
| 694 | 0 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_lowerCAmelCase : List[str] = "examples/"
_lowerCAmelCase : str = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
_lowerCAmelCase : Union[str, Any] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
_lowerCAmelCase : Dict = "README.md"
def _A ( snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str ):
with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case__ : Any = f.read()
snake_case__ : int = REPLACE_PATTERNS[pattern]
snake_case__ : str = replace.replace('''VERSION''' , snake_case__ )
snake_case__ : Any = re_pattern.sub(snake_case__ , snake_case__ )
with open(snake_case__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(snake_case__ )
def _A ( snake_case__ : Any ):
for folder, directories, fnames in os.walk(snake_case__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(snake_case__ , snake_case__ ) , snake_case__ , pattern='''examples''' )
def _A ( snake_case__ : List[Any] , snake_case__ : Dict=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(snake_case__ , snake_case__ , snake_case__ )
if not patch:
update_version_in_examples(snake_case__ )
def _A ( ):
snake_case__ : List[Any] = '''🤗 Transformers currently provides the following architectures'''
snake_case__ : Optional[Any] = '''1. Want to contribute a new model?'''
with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case__ : Optional[Any] = f.readlines()
# Find the start of the list.
snake_case__ : Union[str, Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
snake_case__ : List[str] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
snake_case__ : List[str] = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(snake_case__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(snake_case__ )
def _A ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
snake_case__ : Dict = f.read()
snake_case__ : Any = REPLACE_PATTERNS['''init'''][0].search(snake_case__ ).groups()[0]
return packaging.version.parse(snake_case__ )
def _A ( snake_case__ : List[str]=False ):
snake_case__ : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
snake_case__ : Dict = default_version.base_version
elif patch:
snake_case__ : Optional[Any] = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
snake_case__ : Union[str, Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
snake_case__ : Union[str, Any] = input(f'''Which version are you releasing? [{default_version}]''' )
if len(snake_case__ ) == 0:
snake_case__ : Tuple = default_version
print(f'''Updating version to {version}.''' )
global_version_update(snake_case__ , patch=snake_case__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def _A ( ):
snake_case__ : Optional[Any] = get_version()
snake_case__ : int = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
snake_case__ : Any = current_version.base_version
# Check with the user we got that right.
snake_case__ : int = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(snake_case__ ) == 0:
snake_case__ : List[Any] = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(snake_case__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
_lowerCAmelCase : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 710 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'markuplm'
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=256 , lowerCamelCase=1024 , lowerCamelCase=216 , lowerCamelCase=1001 , lowerCamelCase=32 , lowerCamelCase=50 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> str:
"""simple docstring"""
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Optional[int] = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : List[Any] = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Optional[Any] = position_embedding_type
snake_case__ : Dict = use_cache
snake_case__ : int = classifier_dropout
# additional properties
snake_case__ : Union[str, Any] = max_depth
snake_case__ : Dict = max_xpath_tag_unit_embeddings
snake_case__ : Any = max_xpath_subs_unit_embeddings
snake_case__ : int = tag_pad_id
snake_case__ : Tuple = subs_pad_id
snake_case__ : Dict = xpath_unit_hidden_size
| 694 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _A ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def _A ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ):
if dataset.ndim != value_array.ndim:
snake_case__ : Any = (
'''Wrong input data\'s dimensions... '''
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(snake_case__ )
try:
if dataset.shape[1] != value_array.shape[1]:
snake_case__ : Optional[int] = (
'''Wrong input data\'s shape... '''
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(snake_case__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
snake_case__ : int = (
'''Input data have different datatype... '''
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(snake_case__ )
snake_case__ : Dict = []
for value in value_array:
snake_case__ : str = euclidean(snake_case__ , dataset[0] )
snake_case__ : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
snake_case__ : Dict = euclidean(snake_case__ , snake_case__ )
if dist > temp_dist:
snake_case__ : Union[str, Any] = temp_dist
snake_case__ : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _A ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ):
return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
'''simple docstring'''
def _A ( snake_case__ : float ):
return 10 - x * x
def _A ( snake_case__ : float , snake_case__ : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case__ ) * equation(snake_case__ ) >= 0:
raise ValueError('''Wrong space!''' )
snake_case__ : List[str] = a
while (b - a) >= 0.01:
# Find middle point
snake_case__ : Optional[int] = (a + b) / 2
# Check if middle point is root
if equation(snake_case__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case__ ) * equation(snake_case__ ) < 0:
snake_case__ : Dict = c
else:
snake_case__ : List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 694 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=4 , ) -> Any:
"""simple docstring"""
snake_case__ : Tuple = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : int = seq_length
snake_case__ : Optional[int] = is_training
snake_case__ : int = use_attention_mask
snake_case__ : int = use_token_type_ids
snake_case__ : Dict = use_labels
snake_case__ : Optional[int] = vocab_size
snake_case__ : List[str] = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : Any = max_position_embeddings
snake_case__ : Dict = type_vocab_size
snake_case__ : Optional[int] = type_sequence_label_size
snake_case__ : Dict = initializer_range
snake_case__ : Optional[int] = num_choices
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Any = None
if self.use_attention_mask:
snake_case__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : str = None
if self.use_token_type_ids:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : str = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[int] = self.prepare_config_and_inputs()
snake_case__ : Optional[Any] = config_and_inputs
snake_case__ : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = self.prepare_config_and_inputs()
snake_case__ : Union[str, Any] = config_and_inputs
snake_case__ : List[str] = True
snake_case__ : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[str] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case__ : int = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCamelCase )
snake_case__ : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase )
@require_flax
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : int = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCamelCase )
snake_case__ : Any = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
snake_case__ : Optional[Any] = model(lowerCamelCase )[0]
snake_case__ : str = [1, 11, 50265]
self.assertEqual(list(output.shape ) , lowerCamelCase )
# compare the actual values for a slice.
snake_case__ : List[str] = np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCamelCase )
snake_case__ : List[Any] = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
snake_case__ : Union[str, Any] = model(lowerCamelCase )[0]
# compare the actual values for a slice.
snake_case__ : Tuple = np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 712 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : list[float] , snake_case__ : list[float] ):
snake_case__ : Dict = sorted(numsa + numsa )
snake_case__ ,snake_case__ : Tuple = divmod(len(snake_case__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCAmelCase : List[str] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 694 | 0 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _A ( snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[Any] ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any]=True ):
model.train()
snake_case__ : Dict = model(snake_case__ )
snake_case__ : Dict = F.mse_loss(snake_case__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case__ )
def _A ( snake_case__ : str , snake_case__ : List[Any]=False ):
set_seed(42 )
snake_case__ : Dict = RegressionModel()
snake_case__ : Tuple = deepcopy(snake_case__ )
snake_case__ : Union[str, Any] = RegressionDataset(length=80 )
snake_case__ : Optional[Any] = DataLoader(snake_case__ , batch_size=16 )
model.to(accelerator.device )
if sched:
snake_case__ : Optional[int] = AdamW(params=model.parameters() , lr=1E-3 )
snake_case__ : List[str] = AdamW(params=ddp_model.parameters() , lr=1E-3 )
snake_case__ : Optional[Any] = LambdaLR(snake_case__ , lr_lambda=lambda snake_case__ : epoch**0.65 )
snake_case__ : Dict = LambdaLR(snake_case__ , lr_lambda=lambda snake_case__ : epoch**0.65 )
# Make a copy of `model`
if sched:
snake_case__ : int = accelerator.prepare(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
snake_case__ : int = accelerator.prepare(snake_case__ , snake_case__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _A ( snake_case__ : Optional[int] ):
# Test when on a single CPU or GPU that the context manager does nothing
snake_case__ : List[str] = get_training_setup(snake_case__ )
# Use a single batch
snake_case__ : List[str] = next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
snake_case__ : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
snake_case__ : Dict = ddp_input[torch.randperm(len(snake_case__ ) )]
def _A ( snake_case__ : Union[str, Any] ):
# Test on distributed setup that context manager behaves properly
snake_case__ : Any = get_training_setup(snake_case__ )
# Use a single batch
snake_case__ : List[str] = next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case__ : List[str] = accelerator.gather((ddp_input, ddp_target) )
snake_case__ : Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
snake_case__ : Optional[int] = ddp_input[torch.randperm(len(snake_case__ ) )]
def _A ( snake_case__ : Optional[Any]=False , snake_case__ : Dict=False ):
snake_case__ : Any = Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case__ : Dict = get_training_setup(snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
snake_case__ : str = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case__ : Tuple = accelerator.gather((ddp_input, ddp_target) )
snake_case__ : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
snake_case__ : int = ddp_input[torch.randperm(len(snake_case__ ) )]
GradientState._reset_state()
def _A ( snake_case__ : Optional[int]=False , snake_case__ : Tuple=False ):
snake_case__ : Dict = Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case__ : str = get_training_setup(snake_case__ , snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
snake_case__ : Tuple = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
snake_case__ : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
snake_case__ : Optional[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case__ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def _A ( ):
snake_case__ : Tuple = Accelerator()
snake_case__ : Dict = RegressionDataset(length=80 )
snake_case__ : Tuple = DataLoader(snake_case__ , batch_size=16 )
snake_case__ : Tuple = RegressionDataset(length=96 )
snake_case__ : int = DataLoader(snake_case__ , batch_size=16 )
snake_case__ : Dict = accelerator.prepare(snake_case__ , snake_case__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if iteration < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if batch_num < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _A ( ):
snake_case__ : List[Any] = Accelerator()
snake_case__ : Union[str, Any] = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(snake_case__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(snake_case__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(snake_case__ , snake_case__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case__ , snake_case__ )
def _A ( snake_case__ : Tuple ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 713 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Any = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowerCAmelCase : Any = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 714 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'encoder-decoder'
_lowerCAmelCase = True
def __init__( self , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case__ : List[str] = kwargs.pop('''encoder''' )
snake_case__ : Any = encoder_config.pop('''model_type''' )
snake_case__ : List[str] = kwargs.pop('''decoder''' )
snake_case__ : str = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case__ : Tuple = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : Optional[Any] = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : str = True
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case__ : Optional[int] = True
snake_case__ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = copy.deepcopy(self.__dict__ )
snake_case__ : List[Any] = self.encoder.to_dict()
snake_case__ : str = self.decoder.to_dict()
snake_case__ : Any = self.__class__.model_type
return output
| 694 | 0 |
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 1
@register_to_config
def __init__( self , lowerCamelCase = 1000 , lowerCamelCase = None ) -> Dict:
"""simple docstring"""
self.set_timesteps(lowerCamelCase )
# standard deviation of the initial noise distribution
snake_case__ : List[str] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
snake_case__ : Union[str, Any] = 4
# running values
snake_case__ : List[Any] = []
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Any:
"""simple docstring"""
snake_case__ : Tuple = num_inference_steps
snake_case__ : Any = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
snake_case__ : Optional[int] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
snake_case__ : Tuple = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
snake_case__ : Optional[int] = torch.sin(steps * math.pi / 2 ) ** 2
snake_case__ : Optional[int] = (1.0 - self.betas**2) ** 0.5
snake_case__ : List[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
snake_case__ : Optional[int] = timesteps.to(lowerCamelCase )
snake_case__ : Union[str, Any] = []
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
snake_case__ : Optional[int] = (self.timesteps == timestep).nonzero().item()
snake_case__ : Dict = timestep_index + 1
snake_case__ : str = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(lowerCamelCase )
if len(self.ets ) == 1:
snake_case__ : Any = self.ets[-1]
elif len(self.ets ) == 2:
snake_case__ : List[str] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
snake_case__ : Tuple = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
snake_case__ : Union[str, Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
snake_case__ : Dict = self._get_prev_sample(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : str = self.alphas[timestep_index]
snake_case__ : List[str] = self.betas[timestep_index]
snake_case__ : Optional[Any] = self.alphas[prev_timestep_index]
snake_case__ : Optional[int] = self.betas[prev_timestep_index]
snake_case__ : Any = (sample - sigma * ets) / max(lowerCamelCase , 1E-8 )
snake_case__ : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> Any:
"""simple docstring"""
return self.config.num_train_timesteps
| 715 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
_lowerCAmelCase : Dict = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
_lowerCAmelCase : str = {"vinai/bartpho-syllable": 1_0_2_4}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
snake_case__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
snake_case__ : int = vocab_file
snake_case__ : Optional[Any] = monolingual_vocab_file
snake_case__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : List[str] = cnt
cnt += 1
with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
snake_case__ : Optional[int] = line.strip().split()[0]
snake_case__ : List[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : Any = len(self.fairseq_tokens_to_ids )
snake_case__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = self.__dict__.copy()
snake_case__ : Any = None
snake_case__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : Dict = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : List[str] = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : int = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , '''wb''' ) as fi:
snake_case__ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowerCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 0 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = CodeGenTokenizer
_lowerCAmelCase = CodeGenTokenizerFast
_lowerCAmelCase = True
_lowerCAmelCase = {'add_prefix_space': True}
_lowerCAmelCase = False
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
snake_case__ : Tuple = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
snake_case__ : List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case__ : List[Any] = {'''unk_token''': '''<unk>'''}
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase ) )
def lowercase__ ( self , **lowerCamelCase ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowercase__ ( self , **lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Tuple = '''lower newer'''
snake_case__ : Union[str, Any] = '''lower newer'''
return input_text, output_text
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : int = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Any = '''lower newer'''
snake_case__ : Tuple = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
snake_case__ : List[str] = tokenizer.tokenize(lowerCamelCase , add_prefix_space=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
snake_case__ : Dict = tokens + [tokenizer.unk_token]
snake_case__ : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : Tuple = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase )
snake_case__ : Union[str, Any] = '''lower newer'''
# Testing tokenization
snake_case__ : Optional[Any] = tokenizer.tokenize(lowerCamelCase , add_prefix_space=lowerCamelCase )
snake_case__ : str = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# Testing conversion to ids without special tokens
snake_case__ : List[str] = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
snake_case__ : List[str] = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# Testing conversion to ids with special tokens
snake_case__ : List[Any] = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase )
snake_case__ : Any = tokenizer.encode(lowerCamelCase , add_prefix_space=lowerCamelCase )
snake_case__ : Any = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# Testing the unknown token
snake_case__ : List[Any] = tokens + [rust_tokenizer.unk_token]
snake_case__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
def lowercase__ ( self , *lowerCamelCase , **lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowercase__ ( self , lowerCamelCase=15 ) -> Optional[int]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
# Simple input
snake_case__ : List[str] = '''This is a simple input'''
snake_case__ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case__ : Optional[Any] = ('''This is a simple input''', '''This is a pair''')
snake_case__ : int = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowerCamelCase , tokenizer_r.encode , lowerCamelCase , max_length=lowerCamelCase , padding='''max_length''' )
# Simple input
self.assertRaises(lowerCamelCase , tokenizer_r.encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
lowerCamelCase , tokenizer_r.batch_encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(lowerCamelCase , tokenizer_r.encode , lowerCamelCase , max_length=lowerCamelCase , padding='''max_length''' )
# Pair input
self.assertRaises(lowerCamelCase , tokenizer_r.encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
lowerCamelCase , tokenizer_r.batch_encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding='''max_length''' , )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
snake_case__ : List[str] = '''This is a simple input'''
snake_case__ : Union[str, Any] = ['''This is a simple input looooooooong''', '''This is a simple input''']
snake_case__ : str = ('''This is a simple input''', '''This is a pair''')
snake_case__ : Union[str, Any] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
snake_case__ : Any = tokenizer.pad_token_id
snake_case__ : Dict = tokenizer(lowerCamelCase , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
snake_case__ : List[str] = tokenizer(lowerCamelCase , padding=lowerCamelCase , truncate=lowerCamelCase , return_tensors='''np''' )
snake_case__ : int = tokenizer(*lowerCamelCase , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
snake_case__ : Optional[Any] = tokenizer(lowerCamelCase , padding=lowerCamelCase , truncate=lowerCamelCase , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : str = '''$$$'''
snake_case__ : int = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCamelCase , add_bos_token=lowerCamelCase )
snake_case__ : Optional[int] = '''This is a simple input'''
snake_case__ : int = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case__ : Dict = tokenizer.bos_token_id
snake_case__ : Tuple = tokenizer(lowerCamelCase )
snake_case__ : str = tokenizer(lowerCamelCase )
self.assertEqual(out_s.input_ids[0] , lowerCamelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case__ : Tuple = tokenizer.decode(out_s.input_ids )
snake_case__ : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowerCamelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Dict = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
snake_case__ : Dict = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
snake_case__ : str = '''\nif len_a > len_b: result = a\nelse: result = b'''
snake_case__ : Optional[int] = tokenizer.encode(lowerCamelCase )
snake_case__ : List[str] = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
snake_case__ : Any = tokenizer.decode(lowerCamelCase , truncate_before_pattern=lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
| 716 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowerCAmelCase : str = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowerCAmelCase : Optional[int] = parser.parse_args()
_lowerCAmelCase : Union[str, Any] = "cpu"
_lowerCAmelCase : List[str] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowerCAmelCase : Union[str, Any] = "path-to-your-trained-model"
_lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowerCAmelCase : Optional[Any] = pipe.to(device)
# to channels last
_lowerCAmelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last)
_lowerCAmelCase : str = pipe.vae.to(memory_format=torch.channels_last)
_lowerCAmelCase : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowerCAmelCase : Optional[int] = torch.randn(2, 4, 6_4, 6_4)
_lowerCAmelCase : List[str] = torch.rand(1) * 9_9_9
_lowerCAmelCase : Optional[int] = torch.randn(2, 7_7, 7_6_8)
_lowerCAmelCase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowerCAmelCase : Tuple = 6_6_6
_lowerCAmelCase : str = torch.Generator(device).manual_seed(seed)
_lowerCAmelCase : Dict = {"generator": generator}
if args.steps is not None:
_lowerCAmelCase : Tuple = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowerCAmelCase : Any = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 694 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[Any] ):
# Initialise PyTorch model
snake_case__ : Any = RemBertConfig.from_json_file(snake_case__ )
print('''Building PyTorch model from configuration: {}'''.format(str(snake_case__ ) ) )
snake_case__ : List[str] = RemBertModel(snake_case__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(snake_case__ ) )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 717 |
'''simple docstring'''
import socket
def _A ( ):
snake_case__ : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case__ : str = socket.gethostname()
snake_case__ : Union[str, Any] = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
snake_case__ : int = sock.recv(10_24 )
if not data:
break
out_file.write(snake_case__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 694 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'poolformer'
def __init__( self , lowerCamelCase=3 , lowerCamelCase=16 , lowerCamelCase=16 , lowerCamelCase=3 , lowerCamelCase=4.0 , lowerCamelCase=[2, 2, 6, 2] , lowerCamelCase=[64, 128, 320, 512] , lowerCamelCase=[7, 3, 3, 3] , lowerCamelCase=[4, 2, 2, 2] , lowerCamelCase=[2, 1, 1, 1] , lowerCamelCase=4 , lowerCamelCase=0.0 , lowerCamelCase="gelu" , lowerCamelCase=True , lowerCamelCase=1E-5 , lowerCamelCase=0.02 , **lowerCamelCase , ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[str] = num_channels
snake_case__ : Any = patch_size
snake_case__ : Dict = stride
snake_case__ : int = padding
snake_case__ : Any = pool_size
snake_case__ : List[str] = hidden_sizes
snake_case__ : str = mlp_ratio
snake_case__ : Optional[Any] = depths
snake_case__ : Union[str, Any] = patch_sizes
snake_case__ : int = strides
snake_case__ : Dict = num_encoder_blocks
snake_case__ : Any = drop_path_rate
snake_case__ : Dict = hidden_act
snake_case__ : Tuple = use_layer_scale
snake_case__ : Dict = layer_scale_init_value
snake_case__ : Tuple = initializer_range
super().__init__(**lowerCamelCase )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def lowercase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase__ ( self ) -> float:
"""simple docstring"""
return 2E-3
| 718 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
snake_case__ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
'''simple docstring'''
def _A ( snake_case__ : str , snake_case__ : str ):
snake_case__ : Union[str, Any] = len(snake_case__ )
snake_case__ : Any = []
for i in range(len(snake_case__ ) - pat_len + 1 ):
snake_case__ : Optional[Any] = True
for j in range(snake_case__ ):
if s[i + j] != pattern[j]:
snake_case__ : List[Any] = False
break
if match_found:
position.append(snake_case__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 719 |
'''simple docstring'''
from math import isqrt
def _A ( snake_case__ : int ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) )
def _A ( snake_case__ : int = 10**6 ):
snake_case__ : str = 0
snake_case__ : List[str] = 1
snake_case__ : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase : Dict = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_lowerCAmelCase : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
_lowerCAmelCase : Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
_lowerCAmelCase : List[str] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 , lowerCamelCase="binary" , lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = fa_score(
lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase )
return {"f1": float(lowerCamelCase ) if score.size == 1 else score}
| 694 | 0 |
'''simple docstring'''
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[str] = None
snake_case__ : Dict = None
snake_case__ : Union[str, Any] = graph
self._normalize_graph(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[int] = len(lowerCamelCase )
snake_case__ : Union[str, Any] = None
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if sources is int:
snake_case__ : Optional[Any] = [sources]
if sinks is int:
snake_case__ : List[str] = [sinks]
if len(lowerCamelCase ) == 0 or len(lowerCamelCase ) == 0:
return
snake_case__ : Any = sources[0]
snake_case__ : Optional[Any] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowerCamelCase ) > 1 or len(lowerCamelCase ) > 1:
snake_case__ : List[str] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
snake_case__ : str = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
snake_case__ : List[Any] = max_input_flow
snake_case__ : Any = 0
snake_case__ : str = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
snake_case__ : str = max_input_flow
snake_case__ : Union[str, Any] = size - 1
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowercase__ ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : str = algorithm(self )
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Tuple = flow_network
snake_case__ : List[str] = flow_network.verticesCount
snake_case__ : Optional[int] = flow_network.sourceIndex
snake_case__ : Optional[int] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
snake_case__ : Tuple = flow_network.graph
snake_case__ : List[Any] = False
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
if not self.executed:
self._algorithm()
snake_case__ : Tuple = True
def lowercase__ ( self ) -> int:
"""simple docstring"""
pass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(lowerCamelCase )
# use this to save your result
snake_case__ : Optional[int] = -1
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
super().__init__(lowerCamelCase )
snake_case__ : int = [[0] * self.verticies_count for i in range(self.verticies_count )]
snake_case__ : Dict = [0] * self.verticies_count
snake_case__ : Optional[Any] = [0] * self.verticies_count
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
snake_case__ : List[Any] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
snake_case__ : Union[str, Any] = 0
while i < len(lowerCamelCase ):
snake_case__ : Optional[Any] = vertices_list[i]
snake_case__ : Tuple = self.heights[vertex_index]
self.process_vertex(lowerCamelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(lowerCamelCase ) )
snake_case__ : Optional[Any] = 0
else:
i += 1
snake_case__ : Optional[Any] = sum(self.preflow[self.source_index] )
def lowercase__ ( self , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowerCamelCase , lowerCamelCase )
self.relabel(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
snake_case__ : List[str] = self.heights[to_index]
if min_height is not None:
snake_case__ : Tuple = min_height + 1
if __name__ == "__main__":
_lowerCAmelCase : Any = [0]
_lowerCAmelCase : int = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
_lowerCAmelCase : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
_lowerCAmelCase : str = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
_lowerCAmelCase : Optional[int] = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 721 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 42
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase = 65536 , lowerCamelCase = None , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 0 , lowerCamelCase = "fourier" , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = 0.0 , lowerCamelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase = "UNetMidBlock1D" , lowerCamelCase = None , lowerCamelCase = (32, 32, 64) , lowerCamelCase = None , lowerCamelCase = 8 , lowerCamelCase = 1 , lowerCamelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
snake_case__ : Optional[int] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase , log=lowerCamelCase , flip_sin_to_cos=lowerCamelCase )
snake_case__ : List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case__ : Dict = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase , downscale_freq_shift=lowerCamelCase )
snake_case__ : Dict = block_out_channels[0]
if use_timestep_embedding:
snake_case__ : Any = block_out_channels[0] * 4
snake_case__ : Optional[Any] = TimestepEmbedding(
in_channels=lowerCamelCase , time_embed_dim=lowerCamelCase , act_fn=lowerCamelCase , out_dim=block_out_channels[0] , )
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : List[Any] = None
snake_case__ : Union[str, Any] = nn.ModuleList([] )
snake_case__ : List[str] = None
# down
snake_case__ : Tuple = in_channels
for i, down_block_type in enumerate(lowerCamelCase ):
snake_case__ : Tuple = output_channel
snake_case__ : List[str] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case__ : List[Any] = i == len(lowerCamelCase ) - 1
snake_case__ : Dict = get_down_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase )
# mid
snake_case__ : Optional[int] = get_mid_block(
lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase , add_downsample=lowerCamelCase , )
# up
snake_case__ : Union[str, Any] = list(reversed(lowerCamelCase ) )
snake_case__ : Any = reversed_block_out_channels[0]
if out_block_type is None:
snake_case__ : List[Any] = out_channels
else:
snake_case__ : Dict = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase ):
snake_case__ : List[str] = output_channel
snake_case__ : List[str] = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels
)
snake_case__ : List[str] = i == len(lowerCamelCase ) - 1
snake_case__ : str = get_up_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase )
snake_case__ : Optional[Any] = output_channel
# out
snake_case__ : List[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
snake_case__ : Union[str, Any] = get_out_block(
out_block_type=lowerCamelCase , num_groups_out=lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase , act_fn=lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
snake_case__ : str = timestep
if not torch.is_tensor(lowerCamelCase ):
snake_case__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0:
snake_case__ : Optional[Any] = timesteps[None].to(sample.device )
snake_case__ : Any = self.time_proj(lowerCamelCase )
if self.config.use_timestep_embedding:
snake_case__ : Tuple = self.time_mlp(lowerCamelCase )
else:
snake_case__ : Union[str, Any] = timestep_embed[..., None]
snake_case__ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
snake_case__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
snake_case__ : List[Any] = ()
for downsample_block in self.down_blocks:
snake_case__ ,snake_case__ : Optional[int] = downsample_block(hidden_states=lowerCamelCase , temb=lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case__ : Any = self.mid_block(lowerCamelCase , lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
snake_case__ : str = down_block_res_samples[-1:]
snake_case__ : int = down_block_res_samples[:-1]
snake_case__ : Optional[Any] = upsample_block(lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , temb=lowerCamelCase )
# 5. post-process
if self.out_block:
snake_case__ : Dict = self.out_block(lowerCamelCase , lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase )
| 694 | 0 |
'''simple docstring'''
def _A ( snake_case__ : int , snake_case__ : int ):
return number | (1 << position)
def _A ( snake_case__ : int , snake_case__ : int ):
return number & ~(1 << position)
def _A ( snake_case__ : int , snake_case__ : int ):
return number ^ (1 << position)
def _A ( snake_case__ : int , snake_case__ : int ):
return ((number >> position) & 1) == 1
def _A ( snake_case__ : int , snake_case__ : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _A ( snake_case__ : str , snake_case__ : str ):
snake_case__ : Tuple = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
snake_case__ : List[str] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
snake_case__ : str = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , snake_case__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
snake_case__ : Any = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
snake_case__ : Any = os.path.join(get_home_dir() , '''models''' )
snake_case__ : List[Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ )
snake_case__ : Optional[int] = nlp.model.BERTModel(
snake_case__ , len(snake_case__ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=snake_case__ , use_decoder=snake_case__ , )
original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ )
snake_case__ : Any = original_bort._collect_params_with_prefix()
# Build our config 🤗
snake_case__ : Union[str, Any] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(snake_case__ ),
}
snake_case__ : Dict = BertConfig.from_dict(snake_case__ )
snake_case__ : Dict = BertForMaskedLM(snake_case__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ : str ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ : List[Any] , snake_case__ : Any ):
snake_case__ : Union[str, Any] = hf_param.shape
snake_case__ : Any = to_torch(params[gluon_param] )
snake_case__ : Dict = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
snake_case__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
snake_case__ : int = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
snake_case__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
snake_case__ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
snake_case__ : str = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
snake_case__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
snake_case__ : BertSelfAttention = layer.attention.self
snake_case__ : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
snake_case__ : Dict = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
snake_case__ : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
snake_case__ : int = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
snake_case__ : List[Any] = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
snake_case__ : List[Any] = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
snake_case__ : BertSelfOutput = layer.attention.output
snake_case__ : Optional[Any] = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
snake_case__ : List[str] = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
snake_case__ : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
snake_case__ : Any = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
snake_case__ : BertIntermediate = layer.intermediate
snake_case__ : int = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
snake_case__ : Optional[int] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
snake_case__ : BertOutput = layer.output
snake_case__ : Any = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
snake_case__ : Tuple = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
snake_case__ : Tuple = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
snake_case__ : Union[str, Any] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
snake_case__ : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' )
snake_case__ : str = tokenizer.encode_plus(snake_case__ )['''input_ids''']
# Get gluon output
snake_case__ : List[str] = mx.nd.array([input_ids] )
snake_case__ : Optional[int] = original_bort(inputs=snake_case__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case__ )
snake_case__ : Optional[Any] = BertModel.from_pretrained(snake_case__ )
hf_bort_model.eval()
snake_case__ : Optional[Any] = tokenizer.encode_plus(snake_case__ , return_tensors='''pt''' )
snake_case__ : str = hf_bort_model(**snake_case__ )[0]
snake_case__ : str = output_gluon[0].asnumpy()
snake_case__ : str = output_hf[0].detach().numpy()
snake_case__ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
snake_case__ : Optional[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 694 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
_lowerCAmelCase : Dict = random.Random()
def _A ( snake_case__ : Union[str, Any] , snake_case__ : List[Any]=1.0 , snake_case__ : Union[str, Any]=None , snake_case__ : Union[str, Any]=None ):
if rng is None:
snake_case__ : str = global_rng
snake_case__ : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=24 , lowerCamelCase=24 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ) -> Tuple:
"""simple docstring"""
snake_case__ : List[str] = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : str = min_seq_length
snake_case__ : List[str] = max_seq_length
snake_case__ : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case__ : Any = feature_size
snake_case__ : Union[str, Any] = num_mel_bins
snake_case__ : List[str] = padding_value
snake_case__ : List[Any] = sampling_rate
snake_case__ : Optional[int] = return_attention_mask
snake_case__ : Tuple = do_normalize
def lowercase__ ( self ) -> Any:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase__ ( self , lowerCamelCase=False , lowerCamelCase=False ) -> List[Any]:
"""simple docstring"""
def _flatten(lowerCamelCase ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
snake_case__ : Optional[int] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case__ : str = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case__ : Optional[int] = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[int] = SpeechaTextFeatureExtractionTester(self )
def lowercase__ ( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
self.assertTrue(np.all(np.mean(lowerCamelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase , axis=0 ) - 1 ) < 1E-3 ) )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : int = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
snake_case__ : Optional[int] = feature_extractor(lowerCamelCase , padding=lowerCamelCase , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
snake_case__ : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
snake_case__ : Dict = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
snake_case__ : str = feature_extractor(lowerCamelCase , return_tensors='''np''' ).input_features
snake_case__ : str = feature_extractor(lowerCamelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case__ : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
snake_case__ : Tuple = np.asarray(lowerCamelCase )
snake_case__ : Union[str, Any] = feature_extractor(lowerCamelCase , return_tensors='''np''' ).input_features
snake_case__ : List[str] = feature_extractor(lowerCamelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : Union[str, Any] = ['''longest''', '''max_length''', '''do_not_pad''']
snake_case__ : Dict = [None, 16, None]
for max_length, padding in zip(lowerCamelCase , lowerCamelCase ):
snake_case__ : Tuple = feature_extractor(
lowerCamelCase , padding=lowerCamelCase , max_length=lowerCamelCase , return_attention_mask=lowerCamelCase )
snake_case__ : Any = inputs.input_features
snake_case__ : int = inputs.attention_mask
snake_case__ : Optional[int] = [np.sum(lowerCamelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : str = ['''longest''', '''max_length''', '''do_not_pad''']
snake_case__ : int = [None, 16, None]
for max_length, padding in zip(lowerCamelCase , lowerCamelCase ):
snake_case__ : List[str] = feature_extractor(
lowerCamelCase , max_length=lowerCamelCase , padding=lowerCamelCase , return_tensors='''np''' , return_attention_mask=lowerCamelCase )
snake_case__ : List[Any] = inputs.input_features
snake_case__ : List[str] = inputs.attention_mask
snake_case__ : Dict = [np.sum(lowerCamelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : Optional[int] = feature_extractor(
lowerCamelCase , padding='''max_length''' , max_length=4 , truncation=lowerCamelCase , return_tensors='''np''' , return_attention_mask=lowerCamelCase , )
snake_case__ : Union[str, Any] = inputs.input_features
snake_case__ : Optional[int] = inputs.attention_mask
snake_case__ : Any = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : Optional[Any] = feature_extractor(
lowerCamelCase , padding='''longest''' , max_length=4 , truncation=lowerCamelCase , return_tensors='''np''' , return_attention_mask=lowerCamelCase , )
snake_case__ : int = inputs.input_features
snake_case__ : Dict = inputs.attention_mask
snake_case__ : str = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
snake_case__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : Tuple = feature_extractor(
lowerCamelCase , padding='''longest''' , max_length=16 , truncation=lowerCamelCase , return_tensors='''np''' , return_attention_mask=lowerCamelCase , )
snake_case__ : Optional[int] = inputs.input_features
snake_case__ : List[Any] = inputs.attention_mask
snake_case__ : int = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
import torch
snake_case__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : Union[str, Any] = np.random.rand(100 , 32 ).astype(np.floataa )
snake_case__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case__ : Tuple = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case__ : Dict = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase__ ( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
from datasets import load_dataset
snake_case__ : Union[str, Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
snake_case__ : Optional[int] = ds.sort('''id''' ).select(range(lowerCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
snake_case__ : Dict = self._load_datasamples(1 )
snake_case__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : str = feature_extractor(lowerCamelCase , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
| 701 |
'''simple docstring'''
def _A ( snake_case__ : int = 4_00_00_00 ):
snake_case__ : int = []
snake_case__ ,snake_case__ : Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case__ )
snake_case__ ,snake_case__ : Any = b, a + b
return sum(snake_case__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 0 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_lowerCAmelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Any:
"""simple docstring"""
super().__init__(*lowerCamelCase , **lowerCamelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowercase__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None ) -> str:
"""simple docstring"""
snake_case__ : Dict = {}
snake_case__ : Tuple = {}
if prompt is not None:
snake_case__ : List[str] = prompt
if generate_kwargs is not None:
snake_case__ : Any = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
snake_case__ : Dict = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
snake_case__ : int = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowerCamelCase , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return super().__call__(lowerCamelCase , **lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> Dict:
"""simple docstring"""
snake_case__ : List[Any] = load_image(lowerCamelCase )
if prompt is not None:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCamelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
snake_case__ : int = self.model.config.model_type
if model_type == "git":
snake_case__ : Optional[int] = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
snake_case__ : Dict = self.tokenizer(text=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids
snake_case__ : List[Any] = [self.tokenizer.cls_token_id] + input_ids
snake_case__ : List[Any] = torch.tensor(lowerCamelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
snake_case__ : Optional[int] = self.image_processor(images=lowerCamelCase , header_text=lowerCamelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
snake_case__ : List[Any] = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
snake_case__ : Any = self.tokenizer(lowerCamelCase , return_tensors=self.framework )
model_inputs.update(lowerCamelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
snake_case__ : str = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
snake_case__ : Optional[int] = None
return model_inputs
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> Dict:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , lowerCamelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
snake_case__ : Tuple = None
if generate_kwargs is None:
snake_case__ : Union[str, Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
snake_case__ : List[str] = model_inputs.pop(self.model.main_input_name )
snake_case__ : Dict = self.model.generate(lowerCamelCase , **lowerCamelCase , **lowerCamelCase )
return model_outputs
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple = []
for output_ids in model_outputs:
snake_case__ : Dict = {
'''generated_text''': self.tokenizer.decode(
lowerCamelCase , skip_special_tokens=lowerCamelCase , )
}
records.append(lowerCamelCase )
return records
| 702 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : int = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
_lowerCAmelCase : Optional[int] = {
"google/pegasus-xsum": 5_1_2,
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PegasusTokenizer
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<pad>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<mask_2>" , lowerCamelCase="<mask_1>" , lowerCamelCase=None , lowerCamelCase=103 , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCamelCase )}, but is'''
f''' {type(lowerCamelCase )}''' )
snake_case__ : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCamelCase ) , self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
snake_case__ : List[Any] = additional_special_tokens_extended
else:
snake_case__ : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , pad_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , mask_token=lowerCamelCase , mask_token_sent=lowerCamelCase , offset=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Union[str, Any] = vocab_file
snake_case__ : List[Any] = False if not self.vocab_file else True
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Tuple = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : int = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 694 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Any = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=0 ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[Any] = 1.0 if scale is None else scale
snake_case__ : Dict = 0.0 if loc is None else loc
super().__init__(lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase )] )
@property
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return self.variance.sqrt()
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Tuple = args_dim
snake_case__ : str = nn.ModuleList([nn.Linear(lowerCamelCase , lowerCamelCase ) for dim in args_dim.values()] )
snake_case__ : Optional[int] = domain_map
def lowercase__ ( self , lowerCamelCase ) -> Tuple[torch.Tensor]:
"""simple docstring"""
snake_case__ : Any = [proj(lowerCamelCase ) for proj in self.proj]
return self.domain_map(*lowerCamelCase )
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Tuple = function
def lowercase__ ( self , lowerCamelCase , *lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.function(lowerCamelCase , *lowerCamelCase )
class snake_case :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self , lowerCamelCase = 1 ) -> None:
"""simple docstring"""
snake_case__ : Optional[Any] = dim
snake_case__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowerCamelCase )
else:
return Independent(self.distribution_class(*lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Distribution:
"""simple docstring"""
snake_case__ : List[Any] = self._base_distribution(lowerCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase , loc=lowerCamelCase , scale=lowerCamelCase , event_dim=self.event_dim )
@property
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def lowercase__ ( self ) -> float:
"""simple docstring"""
return 0.0
def lowercase__ ( self , lowerCamelCase ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowercase__ ( self , *lowerCamelCase ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def lowercase__ ( lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Tuple = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
snake_case__ : Optional[int] = 2.0 + cls.squareplus(lowerCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowercase__ ( self , lowerCamelCase ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : str = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 | 0 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCAmelCase : Optional[Any] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowerCAmelCase : Dict = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowerCAmelCase : Tuple = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="auto" , lowerCamelCase=-1 , lowerCamelCase=0.9 , lowerCamelCase=5 , lowerCamelCase=500 , lowerCamelCase="gpt2-large" , lowerCamelCase=-1 , lowerCamelCase=1024 , lowerCamelCase=25 , lowerCamelCase=5 , lowerCamelCase=True , lowerCamelCase=25 , ) -> int:
"""simple docstring"""
snake_case__ : Dict = compute_mauve(
p_text=lowerCamelCase , q_text=lowerCamelCase , p_features=lowerCamelCase , q_features=lowerCamelCase , p_tokens=lowerCamelCase , q_tokens=lowerCamelCase , num_buckets=lowerCamelCase , pca_max_data=lowerCamelCase , kmeans_explained_var=lowerCamelCase , kmeans_num_redo=lowerCamelCase , kmeans_max_iter=lowerCamelCase , featurize_model_name=lowerCamelCase , device_id=lowerCamelCase , max_text_length=lowerCamelCase , divergence_curve_discretization_size=lowerCamelCase , mauve_scaling_factor=lowerCamelCase , verbose=lowerCamelCase , seed=lowerCamelCase , )
return out
| 704 |
'''simple docstring'''
from math import factorial
def _A ( snake_case__ : int = 20 ):
snake_case__ : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case__ : Union[str, Any] = n // 2
return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
_lowerCAmelCase : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 694 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=2 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=10 , lowerCamelCase=3 , lowerCamelCase=32 * 4 , lowerCamelCase=32 * 6 , lowerCamelCase=4 , lowerCamelCase=32 , ) -> List[str]:
"""simple docstring"""
snake_case__ : List[str] = parent
snake_case__ : str = batch_size
snake_case__ : int = is_training
snake_case__ : Optional[Any] = use_auxiliary_loss
snake_case__ : int = num_queries
snake_case__ : str = num_channels
snake_case__ : Any = min_size
snake_case__ : Union[str, Any] = max_size
snake_case__ : Any = num_labels
snake_case__ : List[Any] = mask_feature_size
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase )
snake_case__ : Tuple = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase )
snake_case__ : Optional[int] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase ) > 0.5
).float()
snake_case__ : str = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase ) > 0.5).long()
snake_case__ : List[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[int] = self.prepare_config_and_inputs()
snake_case__ : int = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = output.encoder_hidden_states
snake_case__ : Any = output.pixel_decoder_hidden_states
snake_case__ : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase ) , config.decoder_config.decoder_layers )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
snake_case__ : Optional[Any] = MaskFormerModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Dict = model(pixel_values=lowerCamelCase , pixel_mask=lowerCamelCase )
snake_case__ : Optional[int] = model(lowerCamelCase , output_hidden_states=lowerCamelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase , lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Dict = MaskFormerForInstanceSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
def comm_check_on_output(lowerCamelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case__ : int = model(pixel_values=lowerCamelCase , pixel_mask=lowerCamelCase )
snake_case__ : List[Any] = model(lowerCamelCase )
comm_check_on_output(lowerCamelCase )
snake_case__ : int = model(
pixel_values=lowerCamelCase , pixel_mask=lowerCamelCase , mask_labels=lowerCamelCase , class_labels=lowerCamelCase )
comm_check_on_output(lowerCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_lowerCAmelCase = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Union[str, Any] = MaskFormerModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase , **lowerCamelCase , output_hidden_states=lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCamelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowercase__ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowercase__ ( self ) -> str:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(lowerCamelCase )
snake_case__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : int = [*signature.parameters.keys()]
snake_case__ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase )
@slow
def lowercase__ ( self ) -> str:
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
snake_case__ : Dict = MaskFormerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = (self.model_tester.min_size,) * 2
snake_case__ : int = {
'''pixel_values''': torch.randn((2, 3, *size) , device=lowerCamelCase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=lowerCamelCase ),
'''class_labels''': torch.zeros(2 , 10 , device=lowerCamelCase ).long(),
}
snake_case__ : Union[str, Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCamelCase )
snake_case__ : Dict = model(**lowerCamelCase )
self.assertTrue(outputs.loss is not None )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase , **lowerCamelCase , output_hidden_states=lowerCamelCase )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(lowerCamelCase ).to(lowerCamelCase )
snake_case__ : Optional[int] = model(**lowerCamelCase , output_attentions=lowerCamelCase )
self.assertTrue(outputs.attentions is not None )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
snake_case__ : List[Any] = self.all_model_classes[1]
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Union[str, Any] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
snake_case__ : Optional[int] = model(lowerCamelCase , mask_labels=lowerCamelCase , class_labels=lowerCamelCase ).loss
loss.backward()
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : List[str] = self.all_model_classes[1]
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
snake_case__ : Optional[int] = True
snake_case__ : Optional[int] = True
snake_case__ : Optional[int] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
snake_case__ : Optional[int] = model(lowerCamelCase , mask_labels=lowerCamelCase , class_labels=lowerCamelCase )
snake_case__ : Any = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case__ : Tuple = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
snake_case__ : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case__ : List[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowerCAmelCase : List[Any] = 1E-4
def _A ( ):
snake_case__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(lowerCamelCase )
snake_case__ : Optional[Any] = self.default_image_processor
snake_case__ : Optional[int] = prepare_img()
snake_case__ : List[Any] = image_processor(lowerCamelCase , return_tensors='''pt''' ).to(lowerCamelCase )
snake_case__ : Dict = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
snake_case__ : Optional[Any] = model(**lowerCamelCase )
snake_case__ : Optional[Any] = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
snake_case__ : Union[str, Any] = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
snake_case__ : Optional[int] = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : int = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(lowerCamelCase )
.eval()
)
snake_case__ : Any = self.default_image_processor
snake_case__ : List[Any] = prepare_img()
snake_case__ : Union[str, Any] = image_processor(lowerCamelCase , return_tensors='''pt''' ).to(lowerCamelCase )
snake_case__ : List[str] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
snake_case__ : List[Any] = model(**lowerCamelCase )
# masks_queries_logits
snake_case__ : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
snake_case__ : Union[str, Any] = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
snake_case__ : int = torch.tensor(lowerCamelCase ).to(lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
# class_queries_logits
snake_case__ : List[str] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case__ : Tuple = torch.tensor(
[
[1.6_5_1_2E0_0, -5.2_5_7_2E0_0, -3.3_5_1_9E0_0],
[3.6_1_6_9E-0_2, -5.9_0_2_5E0_0, -2.9_3_1_3E0_0],
[1.0_7_6_6E-0_4, -7.7_6_3_0E0_0, -5.1_2_6_3E0_0],
] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(lowerCamelCase )
.eval()
)
snake_case__ : List[str] = self.default_image_processor
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : str = image_processor(lowerCamelCase , return_tensors='''pt''' ).to(lowerCamelCase )
snake_case__ : Dict = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
snake_case__ : int = model(**lowerCamelCase )
# masks_queries_logits
snake_case__ : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
snake_case__ : Optional[int] = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
snake_case__ : Union[str, Any] = torch.tensor(lowerCamelCase ).to(lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
# class_queries_logits
snake_case__ : List[str] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case__ : List[Any] = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(lowerCamelCase )
.eval()
)
snake_case__ : Dict = self.default_image_processor
snake_case__ : Any = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
snake_case__ : Union[str, Any] = inputs['''pixel_values'''].to(lowerCamelCase )
snake_case__ : Union[str, Any] = [el.to(lowerCamelCase ) for el in inputs['''mask_labels''']]
snake_case__ : Optional[int] = [el.to(lowerCamelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
snake_case__ : List[str] = model(**lowerCamelCase )
self.assertTrue(outputs.loss is not None )
| 705 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = (EulerDiscreteScheduler,)
_lowerCAmelCase = 1_0
def lowercase__ ( self , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Any = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCamelCase )
return config
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Any = self.get_scheduler_config()
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : Any = self.dummy_model()
snake_case__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : List[Any] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : int = model(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Tuple = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Tuple = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Optional[Any] = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : Optional[int] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : Union[str, Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Union[str, Any] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : List[str] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Optional[int] = self.get_scheduler_config()
snake_case__ : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Tuple = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : str = model(lowerCamelCase , lowerCamelCase )
snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : int = output.prev_sample
snake_case__ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : int = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Dict = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config()
snake_case__ : List[Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Dict = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Optional[Any] = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : Dict = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
| 694 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _A ( ):
snake_case__ : str = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
snake_case__ : Dict = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(snake_case__ )
DownloadCommand.register_subcommand(snake_case__ )
EnvironmentCommand.register_subcommand(snake_case__ )
RunCommand.register_subcommand(snake_case__ )
ServeCommand.register_subcommand(snake_case__ )
UserCommands.register_subcommand(snake_case__ )
AddNewModelCommand.register_subcommand(snake_case__ )
AddNewModelLikeCommand.register_subcommand(snake_case__ )
LfsCommands.register_subcommand(snake_case__ )
PTtoTFCommand.register_subcommand(snake_case__ )
# Let's go
snake_case__ : str = parser.parse_args()
if not hasattr(snake_case__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
snake_case__ : Any = args.func(snake_case__ )
service.run()
if __name__ == "__main__":
main()
| 706 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = ['pixel_values']
def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : int = do_resize
snake_case__ : Dict = do_rescale
snake_case__ : Any = size_divisor
snake_case__ : str = resample
super().__init__(**lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
snake_case__ ,snake_case__ : Any = get_image_size(lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
snake_case__ : Any = height // size_divisor * size_divisor
snake_case__ : Union[str, Any] = width // size_divisor * size_divisor
snake_case__ : Tuple = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
return image
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> BatchFeature:
"""simple docstring"""
snake_case__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : Any = size_divisor if size_divisor is not None else self.size_divisor
snake_case__ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
snake_case__ : Optional[Any] = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
snake_case__ : Optional[int] = [to_numpy_array(lowerCamelCase ) for img in images]
if do_resize:
snake_case__ : Union[str, Any] = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
snake_case__ : str = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images]
snake_case__ : Tuple = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
snake_case__ : str = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 694 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = "▁"
_lowerCAmelCase = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
_lowerCAmelCase = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
_lowerCAmelCase = {"vinai/bartpho-syllable": 1_0_2_4}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
snake_case__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
snake_case__ : int = vocab_file
snake_case__ : Optional[Any] = monolingual_vocab_file
snake_case__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : List[str] = cnt
cnt += 1
with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
snake_case__ : Optional[int] = line.strip().split()[0]
snake_case__ : List[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : Any = len(self.fairseq_tokens_to_ids )
snake_case__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = self.__dict__.copy()
snake_case__ : Any = None
snake_case__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : Dict = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : List[str] = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : int = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , '''wb''' ) as fi:
snake_case__ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowerCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 707 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ):
snake_case__ : List[Any] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
| 694 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : List[str] = dataset
snake_case__ : Dict = process
snake_case__ : str = params
def __len__( self ) -> Dict:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : Dict = self.dataset[i]
snake_case__ : str = self.process(lowerCamelCase , **self.params )
return processed
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ) -> Any:
"""simple docstring"""
snake_case__ : List[str] = loader
snake_case__ : Dict = infer
snake_case__ : List[str] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
snake_case__ : Tuple = None
snake_case__ : Optional[Any] = loader_batch_size
# Internal bookkeeping
snake_case__ : Tuple = None
snake_case__ : int = None
def __len__( self ) -> List[str]:
"""simple docstring"""
return len(self.loader )
def __iter__( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : int = iter(self.loader )
return self
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
snake_case__ : Dict = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
snake_case__ : List[Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
# Convert ModelOutput to tuple first
snake_case__ : Optional[int] = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
snake_case__ : Any = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
snake_case__ : Optional[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCamelCase , lowerCamelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
snake_case__ : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
snake_case__ : Any = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
snake_case__ : List[str] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
snake_case__ : int = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
snake_case__ : List[Any] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
snake_case__ : List[Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
snake_case__ : Dict = self._loader_batch_data.__class__(lowerCamelCase )
self._loader_batch_index += 1
return result
def lowercase__ ( self ) -> Any:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
snake_case__ : List[str] = next(self.iterator )
snake_case__ : Union[str, Any] = self.infer(lowerCamelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowerCamelCase , torch.Tensor ):
snake_case__ : Any = processed
else:
snake_case__ : Optional[Any] = list(processed.keys() )[0]
snake_case__ : Optional[int] = processed[key]
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : Optional[int] = len(lowerCamelCase )
else:
snake_case__ : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
snake_case__ : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
snake_case__ : List[Any] = processed
snake_case__ : Any = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
super().__init__(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __iter__( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[Any] = iter(self.loader )
snake_case__ : Union[str, Any] = None
return self
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
if self.subiterator is None:
snake_case__ : str = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
snake_case__ : Union[str, Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
snake_case__ : Dict = self.infer(next(self.iterator ) , **self.params )
snake_case__ : Dict = next(self.subiterator )
return processed
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __iter__( self ) -> Any:
"""simple docstring"""
snake_case__ : int = iter(self.loader )
return self
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple = False
snake_case__ : List[str] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
snake_case__ : Optional[Any] = self.loader_batch_item()
snake_case__ : Optional[int] = item.pop('''is_last''' )
accumulator.append(lowerCamelCase )
if is_last:
return accumulator
while not is_last:
snake_case__ : str = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(lowerCamelCase , torch.Tensor ):
snake_case__ : Tuple = processed
else:
snake_case__ : Union[str, Any] = list(processed.keys() )[0]
snake_case__ : str = processed[key]
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : str = len(lowerCamelCase )
else:
snake_case__ : Tuple = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
snake_case__ : Union[str, Any] = observed_batch_size
snake_case__ : List[Any] = processed
snake_case__ : Dict = 0
while self._loader_batch_index < self.loader_batch_size:
snake_case__ : Optional[Any] = self.loader_batch_item()
snake_case__ : List[Any] = item.pop('''is_last''' )
accumulator.append(lowerCamelCase )
if is_last:
return accumulator
else:
snake_case__ : Any = processed
snake_case__ : Union[str, Any] = item.pop('''is_last''' )
accumulator.append(lowerCamelCase )
return accumulator
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = dataset
snake_case__ : Optional[Any] = key
def __len__( self ) -> Tuple:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.dataset[i][self.key]
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
"""simple docstring"""
snake_case__ : Tuple = dataset
snake_case__ : Union[str, Any] = keya
snake_case__ : Any = keya
def __len__( self ) -> Tuple:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 708 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
snake_case__ : Optional[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
snake_case__ : int = '''The dog is cute and lives in the garden house'''
snake_case__ : Any = jnp.array([tokenizer.encode(lowerCamelCase )] )
snake_case__ : Union[str, Any] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
snake_case__ : int = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
snake_case__ : str = model(lowerCamelCase )['''last_hidden_state''']
self.assertEqual(output.shape , lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowerCamelCase , atol=1E-3 ) )
| 709 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCAmelCase : Union[str, Any] = "\nimport os\n"
_lowerCAmelCase : Optional[int] = "\ndef foo():\n import os\n return False\n"
_lowerCAmelCase : Union[str, Any] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
_lowerCAmelCase : str = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
_lowerCAmelCase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
_lowerCAmelCase : List[str] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
_lowerCAmelCase : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case__ )
def _A ( snake_case__ : List[str] , snake_case__ : Dict ):
snake_case__ : str = os.path.join(snake_case__ , '''test_file.py''' )
with open(snake_case__ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case__ )
snake_case__ : int = get_imports(snake_case__ )
assert parsed_imports == ["os"]
| 694 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_lowerCAmelCase : Any = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_lowerCAmelCase : int = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
_lowerCAmelCase : Optional[int] = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = CHRF.CHAR_ORDER , lowerCamelCase = CHRF.WORD_ORDER , lowerCamelCase = CHRF.BETA , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , ) -> List[Any]:
"""simple docstring"""
snake_case__ : Tuple = len(references[0] )
if any(len(lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
snake_case__ : List[Any] = [[refs[i] for refs in references] for i in range(lowerCamelCase )]
snake_case__ : Tuple = CHRF(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case__ : Union[str, Any] = sb_chrf.corpus_score(lowerCamelCase , lowerCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 710 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'markuplm'
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=256 , lowerCamelCase=1024 , lowerCamelCase=216 , lowerCamelCase=1001 , lowerCamelCase=32 , lowerCamelCase=50 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> str:
"""simple docstring"""
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Optional[int] = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : List[Any] = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Optional[Any] = position_embedding_type
snake_case__ : Dict = use_cache
snake_case__ : int = classifier_dropout
# additional properties
snake_case__ : Union[str, Any] = max_depth
snake_case__ : Dict = max_xpath_tag_unit_embeddings
snake_case__ : Any = max_xpath_subs_unit_embeddings
snake_case__ : int = tag_pad_id
snake_case__ : Tuple = subs_pad_id
snake_case__ : Dict = xpath_unit_hidden_size
| 694 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _A ( snake_case__ : NDArray[floataa] , snake_case__ : NDArray[floataa] , snake_case__ : list[int] , snake_case__ : int , ):
snake_case__ : Dict = coefficient_matrix.shape
snake_case__ : Optional[Any] = constant_matrix.shape
if rowsa != colsa:
snake_case__ : List[str] = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(snake_case__ )
if colsa != 1:
snake_case__ : Any = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(snake_case__ )
if rowsa != rowsa:
snake_case__ : Dict = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(snake_case__ )
if len(snake_case__ ) != rowsa:
snake_case__ : str = (
'''Number of initial values must be equal to number of rows in coefficient '''
f'''matrix but received {len(snake_case__ )} and {rowsa}'''
)
raise ValueError(snake_case__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
snake_case__ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
snake_case__ : List[str] = table.shape
strictly_diagonally_dominant(snake_case__ )
# Iterates the whole matrix for given number of times
for _ in range(snake_case__ ):
snake_case__ : str = []
for row in range(snake_case__ ):
snake_case__ : Any = 0
for col in range(snake_case__ ):
if col == row:
snake_case__ : Union[str, Any] = table[row][col]
elif col == cols - 1:
snake_case__ : Dict = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
snake_case__ : Dict = (temp + val) / denom
new_val.append(snake_case__ )
snake_case__ : Dict = new_val
return [float(snake_case__ ) for i in new_val]
def _A ( snake_case__ : NDArray[floataa] ):
snake_case__ : Union[str, Any] = table.shape
snake_case__ : Dict = True
for i in range(0 , snake_case__ ):
snake_case__ : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
'''simple docstring'''
def _A ( snake_case__ : float ):
return 10 - x * x
def _A ( snake_case__ : float , snake_case__ : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case__ ) * equation(snake_case__ ) >= 0:
raise ValueError('''Wrong space!''' )
snake_case__ : List[str] = a
while (b - a) >= 0.01:
# Find middle point
snake_case__ : Optional[int] = (a + b) / 2
# Check if middle point is root
if equation(snake_case__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case__ ) * equation(snake_case__ ) < 0:
snake_case__ : Dict = c
else:
snake_case__ : List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 694 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def _A ( snake_case__ : List[Any] ):
# initialize config
if "resnet-50" in model_name:
snake_case__ : Optional[int] = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
snake_case__ : Tuple = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
snake_case__ : int = DetrConfig(use_timm_backbone=snake_case__ , backbone_config=snake_case__ )
# set label attributes
snake_case__ : List[Any] = '''panoptic''' in model_name
if is_panoptic:
snake_case__ : Tuple = 2_50
else:
snake_case__ : Tuple = 91
snake_case__ : List[str] = '''huggingface/label-files'''
snake_case__ : List[Any] = '''coco-detection-id2label.json'''
snake_case__ : Union[str, Any] = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ : Tuple = {int(snake_case__ ): v for k, v in idalabel.items()}
snake_case__ : Dict = idalabel
snake_case__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def _A ( snake_case__ : int ):
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ : Any = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
f'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
f'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def _A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] ):
snake_case__ : int = state_dict.pop(snake_case__ )
snake_case__ : Optional[int] = val
def _A ( snake_case__ : Dict , snake_case__ : str=False ):
snake_case__ : str = ''''''
if is_panoptic:
snake_case__ : int = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ : str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case__ : List[str] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Any = in_proj_weight[:2_56, :]
snake_case__ : List[str] = in_proj_bias[:2_56]
snake_case__ : Union[str, Any] = in_proj_weight[2_56:5_12, :]
snake_case__ : Union[str, Any] = in_proj_bias[2_56:5_12]
snake_case__ : Optional[int] = in_proj_weight[-2_56:, :]
snake_case__ : Optional[Any] = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
snake_case__ : Dict = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case__ : str = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[:2_56, :]
snake_case__ : str = in_proj_bias[:2_56]
snake_case__ : Union[str, Any] = in_proj_weight[2_56:5_12, :]
snake_case__ : Union[str, Any] = in_proj_bias[2_56:5_12]
snake_case__ : List[str] = in_proj_weight[-2_56:, :]
snake_case__ : int = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
snake_case__ : Dict = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
snake_case__ : Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
snake_case__ : Tuple = in_proj_weight_cross_attn[:2_56, :]
snake_case__ : Any = in_proj_bias_cross_attn[:2_56]
snake_case__ : List[str] = in_proj_weight_cross_attn[2_56:5_12, :]
snake_case__ : List[str] = in_proj_bias_cross_attn[2_56:5_12]
snake_case__ : Optional[Any] = in_proj_weight_cross_attn[-2_56:, :]
snake_case__ : Tuple = in_proj_bias_cross_attn[-2_56:]
def _A ( ):
snake_case__ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ : Optional[int] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _A ( snake_case__ : str , snake_case__ : str=None , snake_case__ : int=False ):
snake_case__ : List[Any] = get_detr_config(snake_case__ )
# load original model from torch hub
snake_case__ : List[Any] = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(f'''Converting model {model_name}...''' )
snake_case__ : Tuple = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=snake_case__ ).eval()
snake_case__ : Optional[int] = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(snake_case__ ):
if is_panoptic:
snake_case__ : Dict = '''detr.''' + src
rename_key(snake_case__ , snake_case__ , snake_case__ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case__ , is_panoptic=snake_case__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ : str = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
snake_case__ : str = state_dict.pop(snake_case__ )
snake_case__ : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case__ : List[Any] = state_dict.pop(snake_case__ )
snake_case__ : str = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
snake_case__ : Any = state_dict.pop(snake_case__ )
snake_case__ : List[Any] = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
snake_case__ : Union[str, Any] = state_dict.pop(snake_case__ )
snake_case__ : Dict = val
# finally, create HuggingFace model and load state dict
snake_case__ : Optional[Any] = DetrForSegmentation(snake_case__ ) if is_panoptic else DetrForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# verify our conversion on an image
snake_case__ : str = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
snake_case__ : str = DetrImageProcessor(format=snake_case__ )
snake_case__ : Union[str, Any] = processor(images=prepare_img() , return_tensors='''pt''' )
snake_case__ : Optional[int] = encoding['''pixel_values''']
snake_case__ : Tuple = detr(snake_case__ )
snake_case__ : List[str] = model(snake_case__ )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(f'''nielsr/{model_name}''' )
processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
_lowerCAmelCase : Any = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 712 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : list[float] , snake_case__ : list[float] ):
snake_case__ : Dict = sorted(numsa + numsa )
snake_case__ ,snake_case__ : Tuple = divmod(len(snake_case__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCAmelCase : List[str] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 694 | 0 |
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 6_5_5_2_1
def _A ( snake_case__ : str ):
snake_case__ : Dict = 1
snake_case__ : List[str] = 0
for plain_chr in plain_text:
snake_case__ : Tuple = (a + ord(snake_case__ )) % MOD_ADLER
snake_case__ : Union[str, Any] = (b + a) % MOD_ADLER
return (b << 16) | a
| 713 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Any = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 | 0 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _A ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Any ):
if isinstance(snake_case__ , torch.Tensor ):
return image
elif isinstance(snake_case__ , PIL.Image.Image ):
snake_case__ : Optional[int] = [image]
if isinstance(image[0] , PIL.Image.Image ):
snake_case__ : Dict = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
snake_case__ : List[Any] = np.concatenate(snake_case__ , axis=0 )
snake_case__ : int = np.array(snake_case__ ).astype(np.floataa ) / 2_55.0
snake_case__ : Tuple = image.transpose(0 , 3 , 1 , 2 )
snake_case__ : List[str] = 2.0 * image - 1.0
snake_case__ : str = torch.from_numpy(snake_case__ )
elif isinstance(image[0] , torch.Tensor ):
snake_case__ : Dict = torch.cat(snake_case__ , dim=0 )
return image
def _A ( snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int=0.99_95 ):
if not isinstance(snake_case__ , np.ndarray ):
snake_case__ : List[str] = True
snake_case__ : Dict = va.device
snake_case__ : int = va.cpu().numpy()
snake_case__ : List[str] = va.cpu().numpy()
snake_case__ : int = np.sum(va * va / (np.linalg.norm(snake_case__ ) * np.linalg.norm(snake_case__ )) )
if np.abs(snake_case__ ) > DOT_THRESHOLD:
snake_case__ : int = (1 - t) * va + t * va
else:
snake_case__ : Union[str, Any] = np.arccos(snake_case__ )
snake_case__ : int = np.sin(snake_case__ )
snake_case__ : Optional[Any] = theta_a * t
snake_case__ : List[Any] = np.sin(snake_case__ )
snake_case__ : Dict = np.sin(theta_a - theta_t ) / sin_theta_a
snake_case__ : Any = sin_theta_t / sin_theta_a
snake_case__ : Any = sa * va + sa * va
if inputs_are_torch:
snake_case__ : Union[str, Any] = torch.from_numpy(snake_case__ ).to(snake_case__ )
return va
def _A ( snake_case__ : str , snake_case__ : Dict ):
snake_case__ : Union[str, Any] = F.normalize(snake_case__ , dim=-1 )
snake_case__ : Any = F.normalize(snake_case__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _A ( snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
for param in model.parameters():
snake_case__ : Union[str, Any] = value
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=lowerCamelCase , text_encoder=lowerCamelCase , clip_model=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , feature_extractor=lowerCamelCase , coca_model=lowerCamelCase , coca_tokenizer=lowerCamelCase , coca_transform=lowerCamelCase , )
snake_case__ : List[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , lowerCamelCase )
else feature_extractor.size['''shortest_edge''']
)
snake_case__ : Optional[Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , lowerCamelCase )
set_requires_grad(self.clip_model , lowerCamelCase )
def lowercase__ ( self , lowerCamelCase = "auto" ) -> Dict:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case__ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(lowerCamelCase )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
set_requires_grad(self.vae , lowerCamelCase )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
set_requires_grad(self.vae , lowerCamelCase )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
set_requires_grad(self.unet , lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
set_requires_grad(self.unet , lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : Dict = min(int(num_inference_steps * strength ) , lowerCamelCase )
snake_case__ : List[Any] = max(num_inference_steps - init_timestep , 0 )
snake_case__ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ) -> List[str]:
"""simple docstring"""
if not isinstance(lowerCamelCase , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(lowerCamelCase )}''' )
snake_case__ : str = image.to(device=lowerCamelCase , dtype=lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : Any = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCamelCase )
]
snake_case__ : int = torch.cat(lowerCamelCase , dim=0 )
else:
snake_case__ : List[str] = self.vae.encode(lowerCamelCase ).latent_dist.sample(lowerCamelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case__ : Optional[int] = 0.18_215 * init_latents
snake_case__ : str = init_latents.repeat_interleave(lowerCamelCase , dim=0 )
snake_case__ : List[str] = randn_tensor(init_latents.shape , generator=lowerCamelCase , device=lowerCamelCase , dtype=lowerCamelCase )
# get latents
snake_case__ : Optional[int] = self.scheduler.add_noise(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case__ : List[Any] = init_latents
return latents
def lowercase__ ( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = self.coca_transform(lowerCamelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
snake_case__ : List[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
snake_case__ : int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Any:
"""simple docstring"""
snake_case__ : Optional[int] = self.feature_extractor.preprocess(lowerCamelCase )
snake_case__ : int = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
snake_case__ : Optional[Any] = self.clip_model.get_image_features(lowerCamelCase )
snake_case__ : str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCamelCase )
snake_case__ : Optional[int] = image_embeddings_clip.repeat_interleave(lowerCamelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Any:
"""simple docstring"""
snake_case__ : List[str] = latents.detach().requires_grad_()
snake_case__ : List[Any] = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
snake_case__ : int = self.unet(lowerCamelCase , lowerCamelCase , encoder_hidden_states=lowerCamelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
snake_case__ : str = self.scheduler.alphas_cumprod[timestep]
snake_case__ : str = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ : Any = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
snake_case__ : Optional[Any] = torch.sqrt(lowerCamelCase )
snake_case__ : List[str] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , lowerCamelCase ):
snake_case__ : str = self.scheduler.sigmas[index]
snake_case__ : Optional[Any] = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case__ : Union[str, Any] = 1 / 0.18_215 * sample
snake_case__ : str = self.vae.decode(lowerCamelCase ).sample
snake_case__ : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case__ : Tuple = transforms.Resize(self.feature_extractor_size )(lowerCamelCase )
snake_case__ : Union[str, Any] = self.normalize(lowerCamelCase ).to(latents.dtype )
snake_case__ : Optional[Any] = self.clip_model.get_image_features(lowerCamelCase )
snake_case__ : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCamelCase )
snake_case__ : Dict = spherical_dist_loss(lowerCamelCase , lowerCamelCase ).mean() * clip_guidance_scale
snake_case__ : Dict = -torch.autograd.grad(lowerCamelCase , lowerCamelCase )[0]
if isinstance(self.scheduler , lowerCamelCase ):
snake_case__ : int = latents.detach() + grads * (sigma**2)
snake_case__ : Union[str, Any] = noise_pred_original
else:
snake_case__ : Union[str, Any] = noise_pred_original - torch.sqrt(lowerCamelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 512 , lowerCamelCase = 512 , lowerCamelCase = 0.6 , lowerCamelCase = 50 , lowerCamelCase = 7.5 , lowerCamelCase = 1 , lowerCamelCase = 0.0 , lowerCamelCase = 100 , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , lowerCamelCase = 0.8 , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , ) -> List[str]:
"""simple docstring"""
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(lowerCamelCase )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(lowerCamelCase , torch.Generator ) and batch_size > 1:
snake_case__ : str = [generator] + [None] * (batch_size - 1)
snake_case__ : Any = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
snake_case__ : List[str] = [x[0] for x in coca_is_none if x[1]]
snake_case__ : Union[str, Any] = ''', '''.join(lowerCamelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCamelCase ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
snake_case__ : Dict = self.get_image_description(lowerCamelCase )
if style_prompt is None:
if len(lowerCamelCase ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
snake_case__ : Optional[int] = self.get_image_description(lowerCamelCase )
# get prompt text embeddings for content and style
snake_case__ : List[str] = self.tokenizer(
lowerCamelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=lowerCamelCase , return_tensors='''pt''' , )
snake_case__ : Optional[int] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
snake_case__ : Any = self.tokenizer(
lowerCamelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=lowerCamelCase , return_tensors='''pt''' , )
snake_case__ : Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
snake_case__ : List[Any] = slerp(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# duplicate text embeddings for each generation per prompt
snake_case__ : Tuple = text_embeddings.repeat_interleave(lowerCamelCase , dim=0 )
# set timesteps
snake_case__ : Optional[Any] = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
snake_case__ : Any = {}
if accepts_offset:
snake_case__ : Dict = 1
self.scheduler.set_timesteps(lowerCamelCase , **lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
snake_case__ : Optional[int] = self.get_timesteps(lowerCamelCase , lowerCamelCase , self.device )
snake_case__ : Any = timesteps[:1].repeat(lowerCamelCase )
# Preprocess image
snake_case__ : Union[str, Any] = preprocess(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case__ : Dict = self.prepare_latents(
lowerCamelCase , lowerCamelCase , lowerCamelCase , text_embeddings.dtype , self.device , lowerCamelCase )
snake_case__ : str = preprocess(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case__ : List[Any] = self.prepare_latents(
lowerCamelCase , lowerCamelCase , lowerCamelCase , text_embeddings.dtype , self.device , lowerCamelCase )
snake_case__ : List[Any] = slerp(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if clip_guidance_scale > 0:
snake_case__ : Union[str, Any] = self.get_clip_image_embeddings(lowerCamelCase , lowerCamelCase )
snake_case__ : List[Any] = self.get_clip_image_embeddings(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = slerp(
lowerCamelCase , lowerCamelCase , lowerCamelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case__ : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case__ : List[Any] = content_text_input.input_ids.shape[-1]
snake_case__ : Any = self.tokenizer([''''''] , padding='''max_length''' , max_length=lowerCamelCase , return_tensors='''pt''' )
snake_case__ : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
snake_case__ : List[Any] = uncond_embeddings.repeat_interleave(lowerCamelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case__ : Tuple = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case__ : int = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
snake_case__ : Tuple = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
snake_case__ : List[Any] = torch.randn(lowerCamelCase , generator=lowerCamelCase , device='''cpu''' , dtype=lowerCamelCase ).to(
self.device )
else:
snake_case__ : Union[str, Any] = torch.randn(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
snake_case__ : List[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case__ : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case__ : Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case__ : Dict = {}
if accepts_eta:
snake_case__ : Any = eta
# check if the scheduler accepts generator
snake_case__ : Union[str, Any] = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
snake_case__ : Dict = generator
with self.progress_bar(total=lowerCamelCase ):
for i, t in enumerate(lowerCamelCase ):
# expand the latents if we are doing classifier free guidance
snake_case__ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case__ : List[Any] = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
snake_case__ : List[Any] = self.unet(lowerCamelCase , lowerCamelCase , encoder_hidden_states=lowerCamelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
snake_case__ : List[str] = noise_pred.chunk(2 )
snake_case__ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
snake_case__ : str = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
snake_case__ : Optional[Any] = self.cond_fn(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
# compute the previous noisy sample x_t -> x_t-1
snake_case__ : Tuple = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case__ : int = 1 / 0.18_215 * latents
snake_case__ : List[Any] = self.vae.decode(lowerCamelCase ).sample
snake_case__ : Any = (image / 2 + 0.5).clamp(0 , 1 )
snake_case__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case__ : Union[str, Any] = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCamelCase , nsfw_content_detected=lowerCamelCase )
| 714 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'encoder-decoder'
_lowerCAmelCase = True
def __init__( self , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case__ : List[str] = kwargs.pop('''encoder''' )
snake_case__ : Any = encoder_config.pop('''model_type''' )
snake_case__ : List[str] = kwargs.pop('''decoder''' )
snake_case__ : str = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case__ : Tuple = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : Optional[Any] = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : str = True
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case__ : Optional[int] = True
snake_case__ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = copy.deepcopy(self.__dict__ )
snake_case__ : List[Any] = self.encoder.to_dict()
snake_case__ : str = self.decoder.to_dict()
snake_case__ : Any = self.__class__.model_type
return output
| 694 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : Optional[Any] = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
_lowerCAmelCase : Dict = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
_lowerCAmelCase : str = {"vinai/bartpho-syllable": 1_0_2_4}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
snake_case__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
snake_case__ : int = vocab_file
snake_case__ : Optional[Any] = monolingual_vocab_file
snake_case__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : List[str] = cnt
cnt += 1
with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
snake_case__ : Optional[int] = line.strip().split()[0]
snake_case__ : List[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : Any = len(self.fairseq_tokens_to_ids )
snake_case__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = self.__dict__.copy()
snake_case__ : Any = None
snake_case__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : Dict = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : List[str] = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : int = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , '''wb''' ) as fi:
snake_case__ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowerCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
_lowerCAmelCase : int = [8, 5, 9, 7]
_lowerCAmelCase : Optional[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_lowerCAmelCase : Optional[int] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : List[Any] = claim_vector
snake_case__ : Dict = allocated_resources_table
snake_case__ : Optional[Any] = maximum_claim_table
def lowercase__ ( self ) -> list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def lowercase__ ( self ) -> list[int]:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def lowercase__ ( self ) -> list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def lowercase__ ( self ) -> dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(lowerCamelCase ): i for i in self.__need()}
def lowercase__ ( self , **lowerCamelCase ) -> None:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.__need()
snake_case__ : int = self.__allocated_resources_table
snake_case__ : str = self.__available_resources()
snake_case__ : int = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
snake_case__ : Any = False
for each_need in need_list:
snake_case__ : Optional[int] = True
for index, need in enumerate(lowerCamelCase ):
if need > available_resources[index]:
snake_case__ : Optional[Any] = False
break
if execution:
snake_case__ : str = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
snake_case__ : List[Any] = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(lowerCamelCase )
# update available/freed resources stack
snake_case__ : Optional[int] = np.array(lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def lowercase__ ( self ) -> int:
"""simple docstring"""
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(lowerCamelCase ) + 1}'''
+ ''' '''.join(f'''{it:>8}''' for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(lowerCamelCase ) + 1}'''
+ ''' '''.join(f'''{it:>8}''' for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(lowerCamelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowerCAmelCase : str = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowerCAmelCase : Optional[int] = parser.parse_args()
_lowerCAmelCase : Union[str, Any] = "cpu"
_lowerCAmelCase : List[str] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowerCAmelCase : Union[str, Any] = "path-to-your-trained-model"
_lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowerCAmelCase : Optional[Any] = pipe.to(device)
# to channels last
_lowerCAmelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last)
_lowerCAmelCase : str = pipe.vae.to(memory_format=torch.channels_last)
_lowerCAmelCase : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowerCAmelCase : Optional[int] = torch.randn(2, 4, 6_4, 6_4)
_lowerCAmelCase : List[str] = torch.rand(1) * 9_9_9
_lowerCAmelCase : Optional[int] = torch.randn(2, 7_7, 7_6_8)
_lowerCAmelCase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowerCAmelCase : Tuple = 6_6_6
_lowerCAmelCase : str = torch.Generator(device).manual_seed(seed)
_lowerCAmelCase : Dict = {"generator": generator}
if args.steps is not None:
_lowerCAmelCase : Tuple = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowerCAmelCase : Any = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 694 | 0 |
'''simple docstring'''
def _A ( snake_case__ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 717 |
'''simple docstring'''
import socket
def _A ( ):
snake_case__ : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case__ : str = socket.gethostname()
snake_case__ : Union[str, Any] = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
snake_case__ : int = sock.recv(10_24 )
if not data:
break
out_file.write(snake_case__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 694 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
snake_case__ : Any = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(lowerCamelCase )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Any = '''sshleifer/tiny-gpt2'''
snake_case__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=lowerCamelCase , multi_process=lowerCamelCase , )
snake_case__ : Any = TensorFlowBenchmark(lowerCamelCase )
snake_case__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Any = '''sgugger/tiny-distilbert-classification'''
snake_case__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , only_pretrain_model=lowerCamelCase , )
snake_case__ : Union[str, Any] = TensorFlowBenchmark(lowerCamelCase )
snake_case__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : str = '''sshleifer/tiny-gpt2'''
snake_case__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
snake_case__ : Optional[int] = TensorFlowBenchmark(lowerCamelCase )
snake_case__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = '''sshleifer/tiny-gpt2'''
snake_case__ : str = AutoConfig.from_pretrained(lowerCamelCase )
snake_case__ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=lowerCamelCase , multi_process=lowerCamelCase , )
snake_case__ : Any = TensorFlowBenchmark(lowerCamelCase , [config] )
snake_case__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = '''sshleifer/tiny-gpt2'''
snake_case__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase )
snake_case__ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
snake_case__ : Dict = TensorFlowBenchmark(lowerCamelCase , [config] )
snake_case__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Dict = '''sshleifer/tiny-gpt2'''
snake_case__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
snake_case__ : Optional[int] = TensorFlowBenchmark(lowerCamelCase )
snake_case__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = '''sshleifer/tiny-gpt2'''
snake_case__ : Dict = AutoConfig.from_pretrained(lowerCamelCase )
snake_case__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
snake_case__ : List[Any] = TensorFlowBenchmark(lowerCamelCase , [config] )
snake_case__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Optional[Any] = '''patrickvonplaten/t5-tiny-random'''
snake_case__ : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase )
snake_case__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
snake_case__ : int = TensorFlowBenchmark(lowerCamelCase , configs=[config] )
snake_case__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = '''sshleifer/tiny-gpt2'''
snake_case__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=lowerCamelCase , multi_process=lowerCamelCase , )
snake_case__ : Optional[int] = TensorFlowBenchmark(lowerCamelCase )
snake_case__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Tuple = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=lowerCamelCase , save_to_csv=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowerCamelCase , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(lowerCamelCase , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(lowerCamelCase , '''env.csv''' ) , multi_process=lowerCamelCase , )
snake_case__ : List[str] = TensorFlowBenchmark(lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(lowerCamelCase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase , '''env.csv''' ) ).exists() )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : int = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(lowerCamelCase ):
self.assertTrue(hasattr(lowerCamelCase , '''sequential''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''cumulative''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''current''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowerCamelCase , '''log.txt''' ) , log_print=lowerCamelCase , trace_memory_line_by_line=lowerCamelCase , eager_mode=lowerCamelCase , multi_process=lowerCamelCase , )
snake_case__ : int = TensorFlowBenchmark(lowerCamelCase )
snake_case__ : int = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(lowerCamelCase , '''log.txt''' ) ).exists() )
| 718 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
snake_case__ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : List[str] = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = ["MobileNetV2FeatureExtractor"]
_lowerCAmelCase : Optional[int] = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719 |
'''simple docstring'''
from math import isqrt
def _A ( snake_case__ : int ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) )
def _A ( snake_case__ : int = 10**6 ):
snake_case__ : str = 0
snake_case__ : List[str] = 1
snake_case__ : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 0 |
'''simple docstring'''
def _A ( snake_case__ : str , snake_case__ : str ):
assert x is not None
assert y is not None
snake_case__ : Union[str, Any] = len(snake_case__ )
snake_case__ : List[str] = len(snake_case__ )
# declaring the array for storing the dp values
snake_case__ : Union[str, Any] = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
snake_case__ : Any = 1 if x[i - 1] == y[j - 1] else 0
snake_case__ : Optional[int] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
snake_case__ : str = ''''''
snake_case__ : List[Any] = m, n
while i > 0 and j > 0:
snake_case__ : List[Any] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
snake_case__ : str = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = "AGGTAB"
_lowerCAmelCase : Optional[Any] = "GXTXAYB"
_lowerCAmelCase : Any = 4
_lowerCAmelCase : str = "GTAB"
_lowerCAmelCase : Union[str, Any] = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 720 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_lowerCAmelCase : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
_lowerCAmelCase : Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
_lowerCAmelCase : List[str] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 , lowerCamelCase="binary" , lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = fa_score(
lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase )
return {"f1": float(lowerCamelCase ) if score.size == 1 else score}
| 694 | 0 |
'''simple docstring'''
def _A ( snake_case__ : float , snake_case__ : list[float] ):
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
snake_case__ : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(snake_case__ ) )
return round(snake_case__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 42
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase = 65536 , lowerCamelCase = None , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 0 , lowerCamelCase = "fourier" , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = 0.0 , lowerCamelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase = "UNetMidBlock1D" , lowerCamelCase = None , lowerCamelCase = (32, 32, 64) , lowerCamelCase = None , lowerCamelCase = 8 , lowerCamelCase = 1 , lowerCamelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
snake_case__ : Optional[int] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase , log=lowerCamelCase , flip_sin_to_cos=lowerCamelCase )
snake_case__ : List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case__ : Dict = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase , downscale_freq_shift=lowerCamelCase )
snake_case__ : Dict = block_out_channels[0]
if use_timestep_embedding:
snake_case__ : Any = block_out_channels[0] * 4
snake_case__ : Optional[Any] = TimestepEmbedding(
in_channels=lowerCamelCase , time_embed_dim=lowerCamelCase , act_fn=lowerCamelCase , out_dim=block_out_channels[0] , )
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : List[Any] = None
snake_case__ : Union[str, Any] = nn.ModuleList([] )
snake_case__ : List[str] = None
# down
snake_case__ : Tuple = in_channels
for i, down_block_type in enumerate(lowerCamelCase ):
snake_case__ : Tuple = output_channel
snake_case__ : List[str] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case__ : List[Any] = i == len(lowerCamelCase ) - 1
snake_case__ : Dict = get_down_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase )
# mid
snake_case__ : Optional[int] = get_mid_block(
lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase , add_downsample=lowerCamelCase , )
# up
snake_case__ : Union[str, Any] = list(reversed(lowerCamelCase ) )
snake_case__ : Any = reversed_block_out_channels[0]
if out_block_type is None:
snake_case__ : List[Any] = out_channels
else:
snake_case__ : Dict = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase ):
snake_case__ : List[str] = output_channel
snake_case__ : List[str] = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels
)
snake_case__ : List[str] = i == len(lowerCamelCase ) - 1
snake_case__ : str = get_up_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase )
snake_case__ : Optional[Any] = output_channel
# out
snake_case__ : List[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
snake_case__ : Union[str, Any] = get_out_block(
out_block_type=lowerCamelCase , num_groups_out=lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase , act_fn=lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
snake_case__ : str = timestep
if not torch.is_tensor(lowerCamelCase ):
snake_case__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0:
snake_case__ : Optional[Any] = timesteps[None].to(sample.device )
snake_case__ : Any = self.time_proj(lowerCamelCase )
if self.config.use_timestep_embedding:
snake_case__ : Tuple = self.time_mlp(lowerCamelCase )
else:
snake_case__ : Union[str, Any] = timestep_embed[..., None]
snake_case__ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
snake_case__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
snake_case__ : List[Any] = ()
for downsample_block in self.down_blocks:
snake_case__ ,snake_case__ : Optional[int] = downsample_block(hidden_states=lowerCamelCase , temb=lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case__ : Any = self.mid_block(lowerCamelCase , lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
snake_case__ : str = down_block_res_samples[-1:]
snake_case__ : int = down_block_res_samples[:-1]
snake_case__ : Optional[Any] = upsample_block(lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , temb=lowerCamelCase )
# 5. post-process
if self.out_block:
snake_case__ : Dict = self.out_block(lowerCamelCase , lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase )
| 694 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def lowercase__ ( *lowerCamelCase , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : str = ObjectDetectionPipeline(model=lowerCamelCase , image_processor=lowerCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Optional[Any] = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(lowerCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase , {
'''score''': ANY(lowerCamelCase ),
'''label''': ANY(lowerCamelCase ),
'''box''': {'''xmin''': ANY(lowerCamelCase ), '''ymin''': ANY(lowerCamelCase ), '''xmax''': ANY(lowerCamelCase ), '''ymax''': ANY(lowerCamelCase )},
} , )
import datasets
snake_case__ : Any = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
snake_case__ : Optional[int] = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
snake_case__ : int = object_detector(lowerCamelCase , threshold=0.0 )
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(lowerCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase , {
'''score''': ANY(lowerCamelCase ),
'''label''': ANY(lowerCamelCase ),
'''box''': {'''xmin''': ANY(lowerCamelCase ), '''ymin''': ANY(lowerCamelCase ), '''xmax''': ANY(lowerCamelCase ), '''ymax''': ANY(lowerCamelCase )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
pass
@require_torch
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Dict = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
snake_case__ : List[Any] = AutoModelForObjectDetection.from_pretrained(lowerCamelCase )
snake_case__ : str = AutoFeatureExtractor.from_pretrained(lowerCamelCase )
snake_case__ : str = ObjectDetectionPipeline(model=lowerCamelCase , feature_extractor=lowerCamelCase )
snake_case__ : Optional[int] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
] , )
snake_case__ : str = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
[
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
] , )
@require_torch
@slow
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : int = '''facebook/detr-resnet-50'''
snake_case__ : int = AutoModelForObjectDetection.from_pretrained(lowerCamelCase )
snake_case__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase )
snake_case__ : List[Any] = ObjectDetectionPipeline(model=lowerCamelCase , feature_extractor=lowerCamelCase )
snake_case__ : List[str] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
snake_case__ : List[Any] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : Union[str, Any] = '''facebook/detr-resnet-50'''
snake_case__ : int = pipeline('''object-detection''' , model=lowerCamelCase )
snake_case__ : int = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
snake_case__ : Dict = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : str = 0.9_985
snake_case__ : Optional[Any] = '''facebook/detr-resnet-50'''
snake_case__ : Optional[int] = pipeline('''object-detection''' , model=lowerCamelCase )
snake_case__ : Union[str, Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=lowerCamelCase )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : int = '''Narsil/layoutlmv3-finetuned-funsd'''
snake_case__ : Tuple = 0.9_993
snake_case__ : Optional[int] = pipeline('''object-detection''' , model=lowerCamelCase , threshold=lowerCamelCase )
snake_case__ : str = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
{'''score''': 0.9_993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
] , )
| 700 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _A ( snake_case__ : str , snake_case__ : str ):
snake_case__ : Tuple = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
snake_case__ : List[str] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
snake_case__ : str = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , snake_case__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
snake_case__ : Any = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
snake_case__ : Any = os.path.join(get_home_dir() , '''models''' )
snake_case__ : List[Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ )
snake_case__ : Optional[int] = nlp.model.BERTModel(
snake_case__ , len(snake_case__ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=snake_case__ , use_decoder=snake_case__ , )
original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ )
snake_case__ : Any = original_bort._collect_params_with_prefix()
# Build our config 🤗
snake_case__ : Union[str, Any] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(snake_case__ ),
}
snake_case__ : Dict = BertConfig.from_dict(snake_case__ )
snake_case__ : Dict = BertForMaskedLM(snake_case__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ : str ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ : List[Any] , snake_case__ : Any ):
snake_case__ : Union[str, Any] = hf_param.shape
snake_case__ : Any = to_torch(params[gluon_param] )
snake_case__ : Dict = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
snake_case__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
snake_case__ : int = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
snake_case__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
snake_case__ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
snake_case__ : str = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
snake_case__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
snake_case__ : BertSelfAttention = layer.attention.self
snake_case__ : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
snake_case__ : Dict = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
snake_case__ : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
snake_case__ : int = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
snake_case__ : List[Any] = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
snake_case__ : List[Any] = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
snake_case__ : BertSelfOutput = layer.attention.output
snake_case__ : Optional[Any] = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
snake_case__ : List[str] = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
snake_case__ : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
snake_case__ : Any = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
snake_case__ : BertIntermediate = layer.intermediate
snake_case__ : int = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
snake_case__ : Optional[int] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
snake_case__ : BertOutput = layer.output
snake_case__ : Any = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
snake_case__ : Tuple = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
snake_case__ : Tuple = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
snake_case__ : Union[str, Any] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
snake_case__ : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' )
snake_case__ : str = tokenizer.encode_plus(snake_case__ )['''input_ids''']
# Get gluon output
snake_case__ : List[str] = mx.nd.array([input_ids] )
snake_case__ : Optional[int] = original_bort(inputs=snake_case__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case__ )
snake_case__ : Optional[Any] = BertModel.from_pretrained(snake_case__ )
hf_bort_model.eval()
snake_case__ : Optional[Any] = tokenizer.encode_plus(snake_case__ , return_tensors='''pt''' )
snake_case__ : str = hf_bort_model(**snake_case__ )[0]
snake_case__ : str = output_gluon[0].asnumpy()
snake_case__ : str = output_hf[0].detach().numpy()
snake_case__ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
snake_case__ : Optional[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 694 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=3 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = parent
snake_case__ : Tuple = batch_size
snake_case__ : int = seq_length
snake_case__ : List[str] = is_training
snake_case__ : List[Any] = use_input_mask
snake_case__ : str = use_token_type_ids
snake_case__ : Optional[int] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Dict = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : Tuple = hidden_act
snake_case__ : Dict = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : Optional[int] = type_vocab_size
snake_case__ : str = type_sequence_label_size
snake_case__ : str = initializer_range
snake_case__ : List[str] = num_labels
snake_case__ : Dict = num_choices
snake_case__ : Union[str, Any] = scope
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Any = None
if self.use_input_mask:
snake_case__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Any = None
snake_case__ : Dict = None
snake_case__ : List[Any] = None
snake_case__ : str = None
if self.use_labels:
snake_case__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowerCamelCase , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
"""simple docstring"""
snake_case__ : Union[str, Any] = FalconModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : int = model(lowerCamelCase , attention_mask=lowerCamelCase )
snake_case__ : Dict = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : int = True
snake_case__ : Any = FalconModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Optional[Any] = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
snake_case__ : Optional[int] = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
snake_case__ : List[str] = model(lowerCamelCase , attention_mask=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Tuple:
"""simple docstring"""
snake_case__ : Union[str, Any] = FalconForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Optional[Any] = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
snake_case__ : int = True
snake_case__ : int = True
snake_case__ : str = FalconForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# first forward pass
snake_case__ : List[Any] = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
snake_case__ : Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ : int = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )['''hidden_states'''][0]
snake_case__ : Any = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )['''hidden_states'''][0]
# select random slice
snake_case__ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
(
snake_case__
) : str = config_and_inputs
snake_case__ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (FalconForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase = (
{
'feature-extraction': FalconModel,
'text-classification': FalconForSequenceClassification,
'text-generation': FalconForCausalLM,
'question-answering': FalconForQuestionAnswering,
'token-classification': FalconForTokenClassification,
'zero-shot': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : int = FalconModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
snake_case__ : List[str] = alibi
self.model_tester.create_and_check_model(lowerCamelCase , *lowerCamelCase )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = 3
snake_case__ : Optional[Any] = input_dict['''input_ids''']
snake_case__ : Dict = input_ids.ne(1 ).to(lowerCamelCase )
snake_case__ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ : Union[str, Any] = FalconForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Union[str, Any] = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = 3
snake_case__ : Dict = '''single_label_classification'''
snake_case__ : int = input_dict['''input_ids''']
snake_case__ : Tuple = input_ids.ne(1 ).to(lowerCamelCase )
snake_case__ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ : List[str] = FalconForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : str = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Optional[Any] = input_dict['''input_ids''']
snake_case__ : int = FalconForCausalLM(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Union[str, Any] = model(lowerCamelCase , use_cache=lowerCamelCase )
snake_case__ : Any = input_ids.shape[0]
snake_case__ : str = model._convert_to_rw_cache(result.past_key_values )
snake_case__ : List[str] = model._convert_cache_to_standard_format(lowerCamelCase , lowerCamelCase )
for layer in range(len(lowerCamelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Tuple = 3
snake_case__ : List[str] = '''multi_label_classification'''
snake_case__ : str = input_dict['''input_ids''']
snake_case__ : Tuple = input_ids.ne(1 ).to(lowerCamelCase )
snake_case__ : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case__ : Union[str, Any] = FalconForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Dict = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_class in self.all_generative_model_classes:
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(lowerCamelCase , '''use_cache''' ):
return
snake_case__ : Optional[Any] = model_class(lowerCamelCase ).to(lowerCamelCase )
if "use_cache" not in inputs:
snake_case__ : Any = True
snake_case__ : List[Any] = model(**lowerCamelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
snake_case__ : Optional[Any] = (
getattr(lowerCamelCase , '''decoder_layers''' , lowerCamelCase )
or getattr(lowerCamelCase , '''num_decoder_layers''' , lowerCamelCase )
or config.num_hidden_layers
)
snake_case__ : Tuple = getattr(lowerCamelCase , '''num_kv_heads''' , config.num_attention_heads )
snake_case__ : Union[str, Any] = getattr(lowerCamelCase , '''d_model''' , config.hidden_size )
snake_case__ : Any = embed_dim // num_attention_heads
snake_case__ : List[Any] = outputs['''past_key_values''']
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
snake_case__ : Tuple = inputs['''input_ids'''].shape
for i in range(lowerCamelCase ):
if config.new_decoder_architecture:
snake_case__ : Optional[Any] = config.num_attention_heads
elif config.multi_query:
snake_case__ : Tuple = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : int = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
snake_case__ : str = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(lowerCamelCase )
snake_case__ : Any = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowerCamelCase )
snake_case__ : str = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
snake_case__ : Dict = model.generate(**lowerCamelCase , do_sample=lowerCamelCase , max_new_tokens=19 )
snake_case__ : List[Any] = tokenizer.batch_decode(lowerCamelCase )[0]
self.assertEqual(lowerCamelCase , lowerCamelCase )
@slow
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase )
snake_case__ : Union[str, Any] = FalconForCausalLM.from_pretrained(lowerCamelCase )
model.eval()
model.to(lowerCamelCase )
snake_case__ : List[Any] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowerCamelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**lowerCamelCase , do_sample=lowerCamelCase , max_new_tokens=4 )
model.generate(**lowerCamelCase , do_sample=lowerCamelCase , max_new_tokens=4 )
model.generate(**lowerCamelCase , num_beams=2 , max_new_tokens=4 )
@slow
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase )
snake_case__ : List[str] = FalconForCausalLM.from_pretrained(lowerCamelCase )
model.eval()
model.to(device=lowerCamelCase )
snake_case__ : Dict = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowerCamelCase )
# Test results are the same with and without cache
snake_case__ : Dict = model.generate(**lowerCamelCase , do_sample=lowerCamelCase , max_new_tokens=20 , use_cache=lowerCamelCase )
snake_case__ : List[Any] = model.generate(**lowerCamelCase , do_sample=lowerCamelCase , max_new_tokens=20 , use_cache=lowerCamelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 701 |
'''simple docstring'''
def _A ( snake_case__ : int = 4_00_00_00 ):
snake_case__ : int = []
snake_case__ ,snake_case__ : Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case__ )
snake_case__ ,snake_case__ : Any = b, a + b
return sum(snake_case__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 0 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def _A ( snake_case__ : int ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : int = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
_lowerCAmelCase : Optional[int] = {
"google/pegasus-xsum": 5_1_2,
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PegasusTokenizer
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<pad>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<mask_2>" , lowerCamelCase="<mask_1>" , lowerCamelCase=None , lowerCamelCase=103 , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCamelCase )}, but is'''
f''' {type(lowerCamelCase )}''' )
snake_case__ : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCamelCase ) , self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
snake_case__ : List[Any] = additional_special_tokens_extended
else:
snake_case__ : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , pad_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , mask_token=lowerCamelCase , mask_token_sent=lowerCamelCase , offset=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Union[str, Any] = vocab_file
snake_case__ : List[Any] = False if not self.vocab_file else True
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Tuple = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : int = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 694 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
_lowerCAmelCase : str = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_lowerCAmelCase : int = {
"camembert-base": 5_1_2,
}
_lowerCAmelCase : str = "▁"
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase=["<s>NOTUSED", "</s>NOTUSED"] , lowerCamelCase = None , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : List[str] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
snake_case__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , additional_special_tokens=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
snake_case__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
snake_case__ : Tuple = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
snake_case__ : Optional[int] = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
snake_case__ : Union[str, Any] = len(self.fairseq_tokens_to_ids )
snake_case__ : Tuple = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
snake_case__ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : int = [self.cls_token_id]
snake_case__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : Dict = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : List[str] = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(lowerCamelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Any = []
snake_case__ : List[str] = ''''''
snake_case__ : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase ) + token
snake_case__ : Any = True
snake_case__ : str = []
else:
current_sub_tokens.append(lowerCamelCase )
snake_case__ : List[str] = False
out_string += self.sp_model.decode(lowerCamelCase )
return out_string.strip()
def __getstate__( self ) -> str:
"""simple docstring"""
snake_case__ : int = self.__dict__.copy()
snake_case__ : Optional[int] = None
return state
def __setstate__( self , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : Optional[Any] = {}
snake_case__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , '''wb''' ) as fi:
snake_case__ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
| 703 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=0 ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[Any] = 1.0 if scale is None else scale
snake_case__ : Dict = 0.0 if loc is None else loc
super().__init__(lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase )] )
@property
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return self.variance.sqrt()
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Tuple = args_dim
snake_case__ : str = nn.ModuleList([nn.Linear(lowerCamelCase , lowerCamelCase ) for dim in args_dim.values()] )
snake_case__ : Optional[int] = domain_map
def lowercase__ ( self , lowerCamelCase ) -> Tuple[torch.Tensor]:
"""simple docstring"""
snake_case__ : Any = [proj(lowerCamelCase ) for proj in self.proj]
return self.domain_map(*lowerCamelCase )
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Tuple = function
def lowercase__ ( self , lowerCamelCase , *lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.function(lowerCamelCase , *lowerCamelCase )
class snake_case :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self , lowerCamelCase = 1 ) -> None:
"""simple docstring"""
snake_case__ : Optional[Any] = dim
snake_case__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowerCamelCase )
else:
return Independent(self.distribution_class(*lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Distribution:
"""simple docstring"""
snake_case__ : List[Any] = self._base_distribution(lowerCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase , loc=lowerCamelCase , scale=lowerCamelCase , event_dim=self.event_dim )
@property
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def lowercase__ ( self ) -> float:
"""simple docstring"""
return 0.0
def lowercase__ ( self , lowerCamelCase ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowercase__ ( self , *lowerCamelCase ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def lowercase__ ( lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Tuple = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
snake_case__ : Optional[int] = 2.0 + cls.squareplus(lowerCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowercase__ ( self , lowerCamelCase ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : str = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_lowerCAmelCase : Optional[Any] = "docs/source/en/_toctree.yml"
def _A ( snake_case__ : Union[str, Any] ):
snake_case__ : int = defaultdict(snake_case__ )
snake_case__ : Dict = []
snake_case__ : Optional[Any] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(snake_case__ )
snake_case__ : Any = new_doc_list
snake_case__ : Tuple = [key for key, value in counts.items() if value > 1]
snake_case__ : List[str] = []
for duplicate_key in duplicates:
snake_case__ : Union[str, Any] = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(snake_case__ ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
snake_case__ : List[str] = sorted(snake_case__ , key=lambda snake_case__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(snake_case__ ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(snake_case__ )
# Sort
return overview_doc
def _A ( snake_case__ : str=False ):
with open(snake_case__ , encoding='''utf-8''' ) as f:
snake_case__ : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
snake_case__ : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case__ : str = content[api_idx]['''sections''']
# Then to the model doc
snake_case__ : Any = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
snake_case__ : str = api_doc[scheduler_idx]['''sections''']
snake_case__ : Optional[Any] = clean_doc_toc(snake_case__ )
snake_case__ : Optional[int] = False
if new_scheduler_doc != scheduler_doc:
snake_case__ : str = True
if overwrite:
snake_case__ : Dict = new_scheduler_doc
if diff:
if overwrite:
snake_case__ : str = api_doc
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def _A ( snake_case__ : List[Any]=False ):
with open(snake_case__ , encoding='''utf-8''' ) as f:
snake_case__ : List[str] = yaml.safe_load(f.read() )
# Get to the API doc
snake_case__ : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case__ : Any = content[api_idx]['''sections''']
# Then to the model doc
snake_case__ : Union[str, Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
snake_case__ : int = False
snake_case__ : Dict = api_doc[pipeline_idx]['''sections''']
snake_case__ : str = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
snake_case__ : Optional[int] = pipeline_doc['''section''']
snake_case__ : Union[str, Any] = clean_doc_toc(snake_case__ )
if overwrite:
snake_case__ : Dict = new_sub_pipeline_doc
new_pipeline_docs.append(snake_case__ )
# sort overall pipeline doc
snake_case__ : Dict = clean_doc_toc(snake_case__ )
if new_pipeline_docs != pipeline_docs:
snake_case__ : Optional[Any] = True
if overwrite:
snake_case__ : Dict = new_pipeline_docs
if diff:
if overwrite:
snake_case__ : List[str] = api_doc
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowerCAmelCase : str = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 704 |
'''simple docstring'''
from math import factorial
def _A ( snake_case__ : int = 20 ):
snake_case__ : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case__ : Union[str, Any] = n // 2
return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
_lowerCAmelCase : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 694 | 0 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> None:
"""simple docstring"""
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 705 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = (EulerDiscreteScheduler,)
_lowerCAmelCase = 1_0
def lowercase__ ( self , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Any = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCamelCase )
return config
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Any = self.get_scheduler_config()
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : Any = self.dummy_model()
snake_case__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : List[Any] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : int = model(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Tuple = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Tuple = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Optional[Any] = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : Optional[int] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : Union[str, Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Union[str, Any] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : List[str] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Optional[int] = self.get_scheduler_config()
snake_case__ : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Tuple = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : str = model(lowerCamelCase , lowerCamelCase )
snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : int = output.prev_sample
snake_case__ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : int = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Dict = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config()
snake_case__ : List[Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Dict = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Optional[Any] = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : Dict = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
| 694 | 0 |
'''simple docstring'''
import math
def _A ( snake_case__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _A ( snake_case__ : float = 0.1 ):
snake_case__ : Dict = 3
snake_case__ : int = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(snake_case__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = ['pixel_values']
def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : int = do_resize
snake_case__ : Dict = do_rescale
snake_case__ : Any = size_divisor
snake_case__ : str = resample
super().__init__(**lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
snake_case__ ,snake_case__ : Any = get_image_size(lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
snake_case__ : Any = height // size_divisor * size_divisor
snake_case__ : Union[str, Any] = width // size_divisor * size_divisor
snake_case__ : Tuple = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
return image
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> BatchFeature:
"""simple docstring"""
snake_case__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : Any = size_divisor if size_divisor is not None else self.size_divisor
snake_case__ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
snake_case__ : Optional[Any] = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
snake_case__ : Optional[int] = [to_numpy_array(lowerCamelCase ) for img in images]
if do_resize:
snake_case__ : Union[str, Any] = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
snake_case__ : str = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images]
snake_case__ : Tuple = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
snake_case__ : str = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 694 | 0 |
'''simple docstring'''
from math import factorial
def _A ( snake_case__ : int = 1_00 ):
return sum(map(snake_case__ , str(factorial(snake_case__ ) ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 707 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ):
snake_case__ : List[Any] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
| 694 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'dpt'
def __init__( self , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=384 , lowerCamelCase=16 , lowerCamelCase=3 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=[2, 5, 8, 11] , lowerCamelCase="project" , lowerCamelCase=[4, 2, 1, 0.5] , lowerCamelCase=[96, 192, 384, 768] , lowerCamelCase=256 , lowerCamelCase=-1 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=255 , lowerCamelCase=0.1 , lowerCamelCase=[1, 1024, 24, 24] , lowerCamelCase=[0, 1] , lowerCamelCase=None , **lowerCamelCase , ) -> Tuple:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Optional[int] = hidden_size
snake_case__ : Tuple = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case__ : List[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
snake_case__ : Any = BitConfig(**lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case__ : Optional[int] = BitConfig(**lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : List[Any] = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
snake_case__ : Optional[int] = backbone_featmap_shape
snake_case__ : int = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
snake_case__ : List[Any] = None
snake_case__ : Dict = None
snake_case__ : Dict = []
snake_case__ : List[str] = num_hidden_layers
snake_case__ : Tuple = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : Any = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : List[Any] = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : Optional[Any] = image_size
snake_case__ : int = patch_size
snake_case__ : Any = num_channels
snake_case__ : Optional[int] = qkv_bias
snake_case__ : Dict = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
snake_case__ : str = readout_type
snake_case__ : List[Any] = reassemble_factors
snake_case__ : Dict = neck_hidden_sizes
snake_case__ : Any = fusion_hidden_size
snake_case__ : str = head_in_index
snake_case__ : Union[str, Any] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case__ : str = use_auxiliary_head
snake_case__ : Union[str, Any] = auxiliary_loss_weight
snake_case__ : List[str] = semantic_loss_ignore_index
snake_case__ : Dict = semantic_classifier_dropout
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case__ : List[str] = self.backbone_config.to_dict()
snake_case__ : int = self.__class__.model_type
return output
| 708 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
snake_case__ : Optional[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
'''simple docstring'''
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowerCAmelCase : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _A ( ):
snake_case__ : Optional[int] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
snake_case__ : Dict = get_sagemaker_input()
else:
snake_case__ : List[Any] = get_cluster_input()
return config
def _A ( snake_case__ : Tuple=None ):
if subparsers is not None:
snake_case__ : Tuple = subparsers.add_parser('''config''' , description=snake_case__ )
else:
snake_case__ : Dict = argparse.ArgumentParser('''Accelerate config command''' , description=snake_case__ )
parser.add_argument(
'''--config_file''' , default=snake_case__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def _A ( snake_case__ : List[str] ):
snake_case__ : Optional[Any] = get_user_input()
if args.config_file is not None:
snake_case__ : List[Any] = args.config_file
else:
if not os.path.isdir(snake_case__ ):
os.makedirs(snake_case__ )
snake_case__ : Any = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(snake_case__ )
else:
config.to_yaml_file(snake_case__ )
print(f'''accelerate configuration saved at {config_file}''' )
def _A ( ):
snake_case__ : Tuple = config_command_parser()
snake_case__ : int = parser.parse_args()
config_command(snake_case__ )
if __name__ == "__main__":
main()
| 709 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCAmelCase : Union[str, Any] = "\nimport os\n"
_lowerCAmelCase : Optional[int] = "\ndef foo():\n import os\n return False\n"
_lowerCAmelCase : Union[str, Any] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
_lowerCAmelCase : str = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
_lowerCAmelCase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
_lowerCAmelCase : List[str] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
_lowerCAmelCase : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case__ )
def _A ( snake_case__ : List[str] , snake_case__ : Dict ):
snake_case__ : str = os.path.join(snake_case__ , '''test_file.py''' )
with open(snake_case__ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case__ )
snake_case__ : int = get_imports(snake_case__ )
assert parsed_imports == ["os"]
| 694 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Dict = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'markuplm'
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=256 , lowerCamelCase=1024 , lowerCamelCase=216 , lowerCamelCase=1001 , lowerCamelCase=32 , lowerCamelCase=50 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> str:
"""simple docstring"""
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Optional[int] = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : List[Any] = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Optional[Any] = position_embedding_type
snake_case__ : Dict = use_cache
snake_case__ : int = classifier_dropout
# additional properties
snake_case__ : Union[str, Any] = max_depth
snake_case__ : Dict = max_xpath_tag_unit_embeddings
snake_case__ : Any = max_xpath_subs_unit_embeddings
snake_case__ : int = tag_pad_id
snake_case__ : Tuple = subs_pad_id
snake_case__ : Dict = xpath_unit_hidden_size
| 694 | 0 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
_lowerCAmelCase : int = input("Enter image url: ").strip()
print(F'''Downloading image from {url} ...''')
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
_lowerCAmelCase : Any = soup.find("meta", {"property": "og:image"})["content"]
_lowerCAmelCase : str = requests.get(image_url).content
_lowerCAmelCase : str = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, "wb") as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 711 |
'''simple docstring'''
def _A ( snake_case__ : float ):
return 10 - x * x
def _A ( snake_case__ : float , snake_case__ : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case__ ) * equation(snake_case__ ) >= 0:
raise ValueError('''Wrong space!''' )
snake_case__ : List[str] = a
while (b - a) >= 0.01:
# Find middle point
snake_case__ : Optional[int] = (a + b) / 2
# Check if middle point is root
if equation(snake_case__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case__ ) * equation(snake_case__ ) < 0:
snake_case__ : Dict = c
else:
snake_case__ : List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 694 | 0 |
'''simple docstring'''
def _A ( snake_case__ : list[int] , snake_case__ : str ):
snake_case__ : Tuple = int(snake_case__ )
# Initialize Result
snake_case__ : List[str] = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Dict = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
_lowerCAmelCase : List[Any] = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
_lowerCAmelCase : List[str] = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
_lowerCAmelCase : Dict = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
_lowerCAmelCase : List[Any] = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(F'''Following is minimal change for {value}: ''')
_lowerCAmelCase : Union[str, Any] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 712 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : list[float] , snake_case__ : list[float] ):
snake_case__ : Dict = sorted(numsa + numsa )
snake_case__ ,snake_case__ : Tuple = divmod(len(snake_case__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCAmelCase : List[str] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 694 | 0 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ):
snake_case__ : List[Any] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
| 713 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Any = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = (UniPCMultistepScheduler,)
_lowerCAmelCase = (('num_inference_steps', 2_5),)
def lowercase__ ( self , **lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**lowerCamelCase )
return config
def lowercase__ ( self , lowerCamelCase=0 , **lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Optional[Any] = dict(self.forward_default_kwargs )
snake_case__ : Dict = kwargs.pop('''num_inference_steps''' , lowerCamelCase )
snake_case__ : Union[str, Any] = self.dummy_sample
snake_case__ : int = 0.1 * sample
snake_case__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case__ : Tuple = self.get_scheduler_config(**lowerCamelCase )
snake_case__ : Tuple = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
snake_case__ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
snake_case__ : Union[str, Any] = scheduler_class.from_pretrained(lowerCamelCase )
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
snake_case__ : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case__ : int = sample, sample
for t in range(lowerCamelCase , time_step + scheduler.config.solver_order + 1 ):
snake_case__ : Optional[Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
snake_case__ : str = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase__ ( self , lowerCamelCase=0 , **lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : Optional[int] = dict(self.forward_default_kwargs )
snake_case__ : List[Any] = kwargs.pop('''num_inference_steps''' , lowerCamelCase )
snake_case__ : List[str] = self.dummy_sample
snake_case__ : Union[str, Any] = 0.1 * sample
snake_case__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case__ : Union[str, Any] = self.get_scheduler_config()
snake_case__ : Any = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
snake_case__ : Optional[int] = scheduler_class.from_pretrained(lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
snake_case__ : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case__ : Optional[int] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
snake_case__ : Dict = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase__ ( self , lowerCamelCase=None , **lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if scheduler is None:
snake_case__ : int = self.scheduler_classes[0]
snake_case__ : Dict = self.get_scheduler_config(**lowerCamelCase )
snake_case__ : Union[str, Any] = scheduler_class(**lowerCamelCase )
snake_case__ : Tuple = self.scheduler_classes[0]
snake_case__ : int = self.get_scheduler_config(**lowerCamelCase )
snake_case__ : str = scheduler_class(**lowerCamelCase )
snake_case__ : Dict = 10
snake_case__ : Union[str, Any] = self.dummy_model()
snake_case__ : Dict = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : List[Any] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
return sample
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Dict = dict(self.forward_default_kwargs )
snake_case__ : List[Any] = kwargs.pop('''num_inference_steps''' , lowerCamelCase )
for scheduler_class in self.scheduler_classes:
snake_case__ : List[Any] = self.get_scheduler_config()
snake_case__ : str = scheduler_class(**lowerCamelCase )
snake_case__ : List[str] = self.dummy_sample
snake_case__ : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase , '''set_timesteps''' ):
scheduler.set_timesteps(lowerCamelCase )
elif num_inference_steps is not None and not hasattr(lowerCamelCase , '''set_timesteps''' ):
snake_case__ : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
snake_case__ : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
snake_case__ : List[Any] = scheduler.timesteps[5]
snake_case__ : Any = scheduler.timesteps[6]
snake_case__ : Dict = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : List[Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
snake_case__ : Tuple = self.full_loop(scheduler=lowerCamelCase )
snake_case__ : List[Any] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_464 ) < 1E-3
snake_case__ : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case__ : Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config )
snake_case__ : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case__ : int = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case__ : List[str] = self.full_loop(scheduler=lowerCamelCase )
snake_case__ : List[str] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_464 ) < 1E-3
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowercase__ ( self ) -> int:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase , prediction_type=lowerCamelCase , sample_max_value=lowerCamelCase , solver_order=lowerCamelCase , solver_type=lowerCamelCase , )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase , solver_type=lowerCamelCase , prediction_type=lowerCamelCase , )
snake_case__ : Any = self.full_loop(
solver_order=lowerCamelCase , solver_type=lowerCamelCase , prediction_type=lowerCamelCase , )
assert not torch.isnan(lowerCamelCase ).any(), "Samples have nan numbers"
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
self.check_over_configs(lower_order_final=lowerCamelCase )
self.check_over_configs(lower_order_final=lowerCamelCase )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCamelCase , time_step=0 )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = self.full_loop()
snake_case__ : Tuple = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_464 ) < 1E-3
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : List[str] = self.full_loop(prediction_type='''v_prediction''' )
snake_case__ : int = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.1_014 ) < 1E-3
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config(thresholding=lowerCamelCase , dynamic_thresholding_ratio=0 )
snake_case__ : str = scheduler_class(**lowerCamelCase )
snake_case__ : Optional[Any] = 10
snake_case__ : Optional[Any] = self.dummy_model()
snake_case__ : Union[str, Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : str = model(lowerCamelCase , lowerCamelCase )
snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def lowercase__ ( self , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
snake_case__ : Optional[Any] = self.get_scheduler_config(**lowerCamelCase )
snake_case__ : Union[str, Any] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 714 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'encoder-decoder'
_lowerCAmelCase = True
def __init__( self , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case__ : List[str] = kwargs.pop('''encoder''' )
snake_case__ : Any = encoder_config.pop('''model_type''' )
snake_case__ : List[str] = kwargs.pop('''decoder''' )
snake_case__ : str = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case__ : Tuple = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : Optional[Any] = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : str = True
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case__ : Optional[int] = True
snake_case__ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = copy.deepcopy(self.__dict__ )
snake_case__ : List[Any] = self.encoder.to_dict()
snake_case__ : str = self.decoder.to_dict()
snake_case__ : Any = self.__class__.model_type
return output
| 694 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_lowerCAmelCase = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[str] = AudioClassificationPipeline(model=lowerCamelCase , feature_extractor=lowerCamelCase )
# test with a raw waveform
snake_case__ : Any = np.zeros((34000,) )
snake_case__ : Dict = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Any = examples
snake_case__ : Dict = audio_classifier(lowerCamelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowerCamelCase , [
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
] , )
snake_case__ : Tuple = audio_classifier(lowerCamelCase , top_k=1 )
self.assertEqual(
lowerCamelCase , [
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
] , )
self.run_torchaudio(lowerCamelCase )
@require_torchaudio
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
import datasets
# test with a local file
snake_case__ : List[str] = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
snake_case__ : List[str] = dataset[0]['''audio''']['''array''']
snake_case__ : List[str] = audio_classifier(lowerCamelCase )
self.assertEqual(
lowerCamelCase , [
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
] , )
@require_torch
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : List[Any] = '''anton-l/wav2vec2-random-tiny-classifier'''
snake_case__ : List[Any] = pipeline('''audio-classification''' , model=lowerCamelCase )
snake_case__ : Optional[Any] = np.ones((8000,) )
snake_case__ : List[str] = audio_classifier(lowerCamelCase , top_k=4 )
snake_case__ : str = [
{'''score''': 0.0_842, '''label''': '''no'''},
{'''score''': 0.0_838, '''label''': '''up'''},
{'''score''': 0.0_837, '''label''': '''go'''},
{'''score''': 0.0_834, '''label''': '''right'''},
]
snake_case__ : Any = [
{'''score''': 0.0_845, '''label''': '''stop'''},
{'''score''': 0.0_844, '''label''': '''on'''},
{'''score''': 0.0_841, '''label''': '''right'''},
{'''score''': 0.0_834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
snake_case__ : Any = {'''array''': np.ones((8000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
snake_case__ : List[str] = audio_classifier(lowerCamelCase , top_k=4 )
self.assertIn(nested_simplify(lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
import datasets
snake_case__ : List[Any] = '''superb/wav2vec2-base-superb-ks'''
snake_case__ : str = pipeline('''audio-classification''' , model=lowerCamelCase )
snake_case__ : List[Any] = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
snake_case__ : Dict = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
snake_case__ : str = audio_classifier(lowerCamelCase , top_k=4 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=3 ) , [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
pass
| 715 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
_lowerCAmelCase : Dict = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
_lowerCAmelCase : str = {"vinai/bartpho-syllable": 1_0_2_4}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
snake_case__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
snake_case__ : int = vocab_file
snake_case__ : Optional[Any] = monolingual_vocab_file
snake_case__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : List[str] = cnt
cnt += 1
with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
snake_case__ : Optional[int] = line.strip().split()[0]
snake_case__ : List[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : Any = len(self.fairseq_tokens_to_ids )
snake_case__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = self.__dict__.copy()
snake_case__ : Any = None
snake_case__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : Dict = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : List[str] = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : int = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , '''wb''' ) as fi:
snake_case__ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowerCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 0 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowerCAmelCase : Any = logging.getLogger(__name__)
@dataclass
class snake_case :
"""simple docstring"""
_lowerCAmelCase = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
_lowerCAmelCase = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
_lowerCAmelCase = field(
default=1_0_2_4 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
_lowerCAmelCase = field(default=__lowerCamelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
snake_case__ : List[Any] = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
snake_case__ : Tuple = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class snake_case :
"""simple docstring"""
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
_lowerCAmelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def _A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ : List[Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
snake_case__ : int = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
datasets.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
snake_case__ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case__ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case__ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
snake_case__ : List[Any] = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
snake_case__ : int = data_args.train_file.split('''.''' )[-1]
snake_case__ : int = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
snake_case__ : Union[str, Any] = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
snake_case__ : int = load_dataset('''csv''' , data_files=snake_case__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
snake_case__ : int = load_dataset('''json''' , data_files=snake_case__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
snake_case__ : Tuple = raw_datasets['''train'''].features['''label'''].names
snake_case__ : Dict = len(snake_case__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
snake_case__ : Union[str, Any] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=snake_case__ , )
snake_case__ : Any = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
snake_case__ : Tuple = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case__ : Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
snake_case__ : str = {'''Refused''': 0, '''Entailed''': 1}
snake_case__ : List[Any] = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
snake_case__ : Optional[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(snake_case__ : Any ):
# Tokenize the texts
def _convert_table_text_to_pandas(snake_case__ : int ):
snake_case__ : Optional[Any] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
snake_case__ : int = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
snake_case__ : Union[str, Any] = examples['''statement''']
snake_case__ : Union[str, Any] = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
snake_case__ : str = tokenizer(snake_case__ , snake_case__ , padding=snake_case__ , max_length=snake_case__ , truncation=snake_case__ )
snake_case__ : str = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
snake_case__ : Dict = raw_datasets.map(
snake_case__ , batched=snake_case__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
snake_case__ : str = raw_datasets['''train''']
if data_args.max_train_samples is not None:
snake_case__ : Optional[Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
snake_case__ : Any = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
snake_case__ : Optional[int] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
snake_case__ : Dict = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
snake_case__ : int = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(snake_case__ ) ) , 3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(snake_case__ : EvalPrediction ):
snake_case__ : Tuple = p.predictions[0] if isinstance(p.predictions , snake_case__ ) else p.predictions
snake_case__ : Any = np.argmax(snake_case__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case__ : Tuple = default_data_collator
elif training_args.fpaa:
snake_case__ : Optional[Any] = DataCollatorWithPadding(snake_case__ , pad_to_multiple_of=8 )
else:
snake_case__ : Optional[Any] = None
# Initialize our Trainer
snake_case__ : Tuple = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=snake_case__ , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
snake_case__ : Dict = None
if training_args.resume_from_checkpoint is not None:
snake_case__ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case__ : Tuple = last_checkpoint
snake_case__ : List[Any] = trainer.train(resume_from_checkpoint=snake_case__ )
snake_case__ : Any = train_result.metrics
snake_case__ : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case__ )
)
snake_case__ : Optional[int] = min(snake_case__ , len(snake_case__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , snake_case__ )
trainer.save_metrics('''train''' , snake_case__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case__ : List[str] = trainer.evaluate(eval_dataset=snake_case__ )
snake_case__ : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(snake_case__ )
snake_case__ : List[str] = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics('''eval''' , snake_case__ )
trainer.save_metrics('''eval''' , snake_case__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
snake_case__ : Any = predict_dataset.remove_columns('''label''' )
snake_case__ : Dict = trainer.predict(snake_case__ , metric_key_prefix='''predict''' ).predictions
snake_case__ : List[str] = np.argmax(snake_case__ , axis=1 )
snake_case__ : Any = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(snake_case__ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(snake_case__ ):
snake_case__ : Dict = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
snake_case__ : Optional[Any] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
def _A ( snake_case__ : Tuple ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 716 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowerCAmelCase : str = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowerCAmelCase : Optional[int] = parser.parse_args()
_lowerCAmelCase : Union[str, Any] = "cpu"
_lowerCAmelCase : List[str] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowerCAmelCase : Union[str, Any] = "path-to-your-trained-model"
_lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowerCAmelCase : Optional[Any] = pipe.to(device)
# to channels last
_lowerCAmelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last)
_lowerCAmelCase : str = pipe.vae.to(memory_format=torch.channels_last)
_lowerCAmelCase : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowerCAmelCase : Optional[int] = torch.randn(2, 4, 6_4, 6_4)
_lowerCAmelCase : List[str] = torch.rand(1) * 9_9_9
_lowerCAmelCase : Optional[int] = torch.randn(2, 7_7, 7_6_8)
_lowerCAmelCase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowerCAmelCase : Tuple = 6_6_6
_lowerCAmelCase : str = torch.Generator(device).manual_seed(seed)
_lowerCAmelCase : Dict = {"generator": generator}
if args.steps is not None:
_lowerCAmelCase : Tuple = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowerCAmelCase : Any = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 694 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 4_2
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase = 65536 , lowerCamelCase = None , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 0 , lowerCamelCase = "fourier" , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = 0.0 , lowerCamelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase = "UNetMidBlock1D" , lowerCamelCase = None , lowerCamelCase = (32, 32, 64) , lowerCamelCase = None , lowerCamelCase = 8 , lowerCamelCase = 1 , lowerCamelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
snake_case__ : Optional[int] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase , log=lowerCamelCase , flip_sin_to_cos=lowerCamelCase )
snake_case__ : List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case__ : Dict = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase , downscale_freq_shift=lowerCamelCase )
snake_case__ : Dict = block_out_channels[0]
if use_timestep_embedding:
snake_case__ : Any = block_out_channels[0] * 4
snake_case__ : Optional[Any] = TimestepEmbedding(
in_channels=lowerCamelCase , time_embed_dim=lowerCamelCase , act_fn=lowerCamelCase , out_dim=block_out_channels[0] , )
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : List[Any] = None
snake_case__ : Union[str, Any] = nn.ModuleList([] )
snake_case__ : List[str] = None
# down
snake_case__ : Tuple = in_channels
for i, down_block_type in enumerate(lowerCamelCase ):
snake_case__ : Tuple = output_channel
snake_case__ : List[str] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case__ : List[Any] = i == len(lowerCamelCase ) - 1
snake_case__ : Dict = get_down_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase )
# mid
snake_case__ : Optional[int] = get_mid_block(
lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase , add_downsample=lowerCamelCase , )
# up
snake_case__ : Union[str, Any] = list(reversed(lowerCamelCase ) )
snake_case__ : Any = reversed_block_out_channels[0]
if out_block_type is None:
snake_case__ : List[Any] = out_channels
else:
snake_case__ : Dict = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase ):
snake_case__ : List[str] = output_channel
snake_case__ : List[str] = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels
)
snake_case__ : List[str] = i == len(lowerCamelCase ) - 1
snake_case__ : str = get_up_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase )
snake_case__ : Optional[Any] = output_channel
# out
snake_case__ : List[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
snake_case__ : Union[str, Any] = get_out_block(
out_block_type=lowerCamelCase , num_groups_out=lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase , act_fn=lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
snake_case__ : str = timestep
if not torch.is_tensor(lowerCamelCase ):
snake_case__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0:
snake_case__ : Optional[Any] = timesteps[None].to(sample.device )
snake_case__ : Any = self.time_proj(lowerCamelCase )
if self.config.use_timestep_embedding:
snake_case__ : Tuple = self.time_mlp(lowerCamelCase )
else:
snake_case__ : Union[str, Any] = timestep_embed[..., None]
snake_case__ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
snake_case__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
snake_case__ : List[Any] = ()
for downsample_block in self.down_blocks:
snake_case__ : Optional[int] = downsample_block(hidden_states=lowerCamelCase , temb=lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case__ : Any = self.mid_block(lowerCamelCase , lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
snake_case__ : str = down_block_res_samples[-1:]
snake_case__ : int = down_block_res_samples[:-1]
snake_case__ : Optional[Any] = upsample_block(lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , temb=lowerCamelCase )
# 5. post-process
if self.out_block:
snake_case__ : Dict = self.out_block(lowerCamelCase , lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase )
| 717 |
'''simple docstring'''
import socket
def _A ( ):
snake_case__ : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case__ : str = socket.gethostname()
snake_case__ : Union[str, Any] = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
snake_case__ : int = sock.recv(10_24 )
if not data:
break
out_file.write(snake_case__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 694 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , ) -> Any:
"""simple docstring"""
snake_case__ : str = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case__ : Tuple = parent
snake_case__ : int = batch_size
snake_case__ : Optional[Any] = num_channels
snake_case__ : Tuple = image_size
snake_case__ : int = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : Any = size
snake_case__ : Optional[Any] = do_normalize
snake_case__ : Optional[Any] = image_mean
snake_case__ : int = image_std
def lowercase__ ( self ) -> str:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = DPTImageProcessor if is_vision_available() else None
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Optional[int] = DPTImageProcessingTester(self )
@property
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''size''' ) )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
snake_case__ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case__ : Dict = image_processing(lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case__ : str = image_processing(lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case__ : Tuple = image_processing(lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 718 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
snake_case__ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
'''simple docstring'''
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Any = name
snake_case__ : Any = value
snake_case__ : Union[str, Any] = weight
def __repr__( self ) -> Dict:
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def lowercase__ ( self ) -> Any:
"""simple docstring"""
return self.value
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return self.name
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return self.weight
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return self.value / self.weight
def _A ( snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ):
snake_case__ : int = []
for i in range(len(snake_case__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _A ( snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : str ):
snake_case__ : Dict = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ )
snake_case__ : Tuple = []
snake_case__ : Optional[Any] = 0.0, 0.0
for i in range(len(snake_case__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _A ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
from math import isqrt
def _A ( snake_case__ : int ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) )
def _A ( snake_case__ : int = 10**6 ):
snake_case__ : str = 0
snake_case__ : List[str] = 1
snake_case__ : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Tuple = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_lowerCAmelCase : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
_lowerCAmelCase : Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
_lowerCAmelCase : List[str] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 , lowerCamelCase="binary" , lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = fa_score(
lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase )
return {"f1": float(lowerCamelCase ) if score.size == 1 else score}
| 694 | 0 |
'''simple docstring'''
from random import randint, random
def _A ( snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : int = 5 , ):
snake_case__ : Optional[Any] = [[-1] * number_of_cells] # Create a highway without any car
snake_case__ : Optional[int] = 0
snake_case__ : List[Any] = max(snake_case__ , 0 )
while i < number_of_cells:
snake_case__ : Optional[int] = (
randint(0 , snake_case__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _A ( snake_case__ : list , snake_case__ : int ):
snake_case__ : List[str] = 0
snake_case__ : str = highway_now[car_index + 1 :]
for cell in range(len(snake_case__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(snake_case__ , -1 )
def _A ( snake_case__ : list , snake_case__ : float , snake_case__ : int ):
snake_case__ : int = len(snake_case__ )
# Beforce calculations, the highway is empty
snake_case__ : Any = [-1] * number_of_cells
for car_index in range(snake_case__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
snake_case__ : Optional[Any] = min(highway_now[car_index] + 1 , snake_case__ )
# Number of empty cell before the next car
snake_case__ : int = get_distance(snake_case__ , snake_case__ ) - 1
# We can't have the car causing an accident
snake_case__ : Tuple = min(next_highway[car_index] , snake_case__ )
if random() < probability:
# Randomly, a driver will slow down
snake_case__ : Optional[Any] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _A ( snake_case__ : list , snake_case__ : int , snake_case__ : float , snake_case__ : int ):
snake_case__ : Union[str, Any] = len(highway[0] )
for i in range(snake_case__ ):
snake_case__ : Any = update(highway[i] , snake_case__ , snake_case__ )
snake_case__ : str = [-1] * number_of_cells
for car_index in range(snake_case__ ):
snake_case__ : Optional[Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
snake_case__ : Optional[Any] = (car_index + speed) % number_of_cells
# Commit the change of position
snake_case__ : Dict = speed
highway.append(snake_case__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 42
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase = 65536 , lowerCamelCase = None , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 0 , lowerCamelCase = "fourier" , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = 0.0 , lowerCamelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase = "UNetMidBlock1D" , lowerCamelCase = None , lowerCamelCase = (32, 32, 64) , lowerCamelCase = None , lowerCamelCase = 8 , lowerCamelCase = 1 , lowerCamelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
snake_case__ : Optional[int] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase , log=lowerCamelCase , flip_sin_to_cos=lowerCamelCase )
snake_case__ : List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case__ : Dict = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase , downscale_freq_shift=lowerCamelCase )
snake_case__ : Dict = block_out_channels[0]
if use_timestep_embedding:
snake_case__ : Any = block_out_channels[0] * 4
snake_case__ : Optional[Any] = TimestepEmbedding(
in_channels=lowerCamelCase , time_embed_dim=lowerCamelCase , act_fn=lowerCamelCase , out_dim=block_out_channels[0] , )
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : List[Any] = None
snake_case__ : Union[str, Any] = nn.ModuleList([] )
snake_case__ : List[str] = None
# down
snake_case__ : Tuple = in_channels
for i, down_block_type in enumerate(lowerCamelCase ):
snake_case__ : Tuple = output_channel
snake_case__ : List[str] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case__ : List[Any] = i == len(lowerCamelCase ) - 1
snake_case__ : Dict = get_down_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase )
# mid
snake_case__ : Optional[int] = get_mid_block(
lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase , add_downsample=lowerCamelCase , )
# up
snake_case__ : Union[str, Any] = list(reversed(lowerCamelCase ) )
snake_case__ : Any = reversed_block_out_channels[0]
if out_block_type is None:
snake_case__ : List[Any] = out_channels
else:
snake_case__ : Dict = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase ):
snake_case__ : List[str] = output_channel
snake_case__ : List[str] = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels
)
snake_case__ : List[str] = i == len(lowerCamelCase ) - 1
snake_case__ : str = get_up_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase )
snake_case__ : Optional[Any] = output_channel
# out
snake_case__ : List[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
snake_case__ : Union[str, Any] = get_out_block(
out_block_type=lowerCamelCase , num_groups_out=lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase , act_fn=lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
snake_case__ : str = timestep
if not torch.is_tensor(lowerCamelCase ):
snake_case__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0:
snake_case__ : Optional[Any] = timesteps[None].to(sample.device )
snake_case__ : Any = self.time_proj(lowerCamelCase )
if self.config.use_timestep_embedding:
snake_case__ : Tuple = self.time_mlp(lowerCamelCase )
else:
snake_case__ : Union[str, Any] = timestep_embed[..., None]
snake_case__ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
snake_case__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
snake_case__ : List[Any] = ()
for downsample_block in self.down_blocks:
snake_case__ ,snake_case__ : Optional[int] = downsample_block(hidden_states=lowerCamelCase , temb=lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case__ : Any = self.mid_block(lowerCamelCase , lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
snake_case__ : str = down_block_res_samples[-1:]
snake_case__ : int = down_block_res_samples[:-1]
snake_case__ : Optional[Any] = upsample_block(lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , temb=lowerCamelCase )
# 5. post-process
if self.out_block:
snake_case__ : Dict = self.out_block(lowerCamelCase , lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase )
| 694 | 0 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[int] = 0
@slow
def lowercase__ ( self ) -> int:
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
snake_case__ : Dict = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowerCamelCase ) , 0 )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Dict = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
# Check that tokenizer_type ≠ model_type
snake_case__ : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase , config=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(lowerCamelCase , '''vocab.txt''' ) )
snake_case__ : int = AutoTokenizer.from_pretrained(lowerCamelCase , tokenizer_type='''bert''' , use_fast=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(lowerCamelCase , '''merges.txt''' ) )
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase , tokenizer_type='''gpt2''' , use_fast=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@require_tokenizers
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(lowerCamelCase , '''vocab.txt''' ) )
snake_case__ : str = AutoTokenizer.from_pretrained(lowerCamelCase , tokenizer_type='''bert''' )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(lowerCamelCase , '''merges.txt''' ) )
snake_case__ : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
with pytest.raises(lowerCamelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase__ ( self ) -> Any:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
snake_case__ : List[str] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowerCamelCase , lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase__ ( self ) -> Any:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowerCamelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
snake_case__ : Tuple = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : List[Any] = TOKENIZER_MAPPING.values()
snake_case__ : Dict = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowerCamelCase )
@require_tokenizers
def lowercase__ ( self ) -> Any:
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase ) , lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , lowerCamelCase )
@require_tokenizers
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Any = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=lowerCamelCase )
snake_case__ : List[Any] = '''Hello, world. How are you?'''
snake_case__ : Dict = tokenizer.tokenize(lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
snake_case__ : Dict = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=lowerCamelCase )
snake_case__ : int = tokenizer.tokenize(lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase )
snake_case__ : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : str = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = get_tokenizer_config('''bert-base-cased''' )
snake_case__ : Dict = config.pop('''_commit_hash''' , lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowerCamelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
snake_case__ : int = get_tokenizer_config(lowerCamelCase )
self.assertDictEqual(lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
snake_case__ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase )
snake_case__ : Dict = get_tokenizer_config(lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , lowerCamelCase )
AutoTokenizer.register(lowerCamelCase , slow_tokenizer_class=lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase ):
AutoTokenizer.register(lowerCamelCase , slow_tokenizer_class=lowerCamelCase )
snake_case__ : Dict = CustomTokenizer.from_pretrained(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase )
snake_case__ : str = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(lowerCamelCase , slow_tokenizer_class=lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowerCamelCase , fast_tokenizer_class=lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowerCamelCase , slow_tokenizer_class=lowerCamelCase , fast_tokenizer_class=lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase ):
AutoTokenizer.register(lowerCamelCase , fast_tokenizer_class=lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Tuple = BertTokenizerFast.from_pretrained(lowerCamelCase )
bert_tokenizer.save_pretrained(lowerCamelCase )
snake_case__ : List[Any] = CustomTokenizerFast.from_pretrained(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase )
snake_case__ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase , use_fast=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaises(lowerCamelCase ):
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase ):
snake_case__ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase )
snake_case__ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase )
snake_case__ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase , trust_remote_code=lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
snake_case__ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase , use_fast=lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase )
snake_case__ : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase , trust_remote_code=lowerCamelCase , use_fast=lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = False
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = NewTokenizer
_lowerCAmelCase = False
try:
AutoConfig.register('''custom''' , lowerCamelCase )
AutoTokenizer.register(lowerCamelCase , slow_tokenizer_class=lowerCamelCase )
AutoTokenizer.register(lowerCamelCase , fast_tokenizer_class=lowerCamelCase )
# If remote code is not set, the default is to use local
snake_case__ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
snake_case__ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
snake_case__ : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase , use_fast=lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase , use_fast=lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
snake_case__ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=lowerCamelCase , use_fast=lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
snake_case__ : Dict = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase__ ( self ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case__ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase , revision='''aaaaaa''' )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 700 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _A ( snake_case__ : str , snake_case__ : str ):
snake_case__ : Tuple = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
snake_case__ : List[str] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
snake_case__ : str = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , snake_case__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
snake_case__ : Any = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
snake_case__ : Any = os.path.join(get_home_dir() , '''models''' )
snake_case__ : List[Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ )
snake_case__ : Optional[int] = nlp.model.BERTModel(
snake_case__ , len(snake_case__ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=snake_case__ , use_decoder=snake_case__ , )
original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ )
snake_case__ : Any = original_bort._collect_params_with_prefix()
# Build our config 🤗
snake_case__ : Union[str, Any] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(snake_case__ ),
}
snake_case__ : Dict = BertConfig.from_dict(snake_case__ )
snake_case__ : Dict = BertForMaskedLM(snake_case__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ : str ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ : List[Any] , snake_case__ : Any ):
snake_case__ : Union[str, Any] = hf_param.shape
snake_case__ : Any = to_torch(params[gluon_param] )
snake_case__ : Dict = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
snake_case__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
snake_case__ : int = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
snake_case__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
snake_case__ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
snake_case__ : str = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
snake_case__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
snake_case__ : BertSelfAttention = layer.attention.self
snake_case__ : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
snake_case__ : Dict = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
snake_case__ : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
snake_case__ : int = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
snake_case__ : List[Any] = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
snake_case__ : List[Any] = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
snake_case__ : BertSelfOutput = layer.attention.output
snake_case__ : Optional[Any] = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
snake_case__ : List[str] = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
snake_case__ : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
snake_case__ : Any = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
snake_case__ : BertIntermediate = layer.intermediate
snake_case__ : int = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
snake_case__ : Optional[int] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
snake_case__ : BertOutput = layer.output
snake_case__ : Any = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
snake_case__ : Tuple = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
snake_case__ : Tuple = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
snake_case__ : Union[str, Any] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
snake_case__ : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' )
snake_case__ : str = tokenizer.encode_plus(snake_case__ )['''input_ids''']
# Get gluon output
snake_case__ : List[str] = mx.nd.array([input_ids] )
snake_case__ : Optional[int] = original_bort(inputs=snake_case__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case__ )
snake_case__ : Optional[Any] = BertModel.from_pretrained(snake_case__ )
hf_bort_model.eval()
snake_case__ : Optional[Any] = tokenizer.encode_plus(snake_case__ , return_tensors='''pt''' )
snake_case__ : str = hf_bort_model(**snake_case__ )[0]
snake_case__ : str = output_gluon[0].asnumpy()
snake_case__ : str = output_hf[0].detach().numpy()
snake_case__ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
snake_case__ : Optional[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 694 | 0 |
'''simple docstring'''
def _A ( snake_case__ : int = 10 , snake_case__ : int = 22 ):
snake_case__ : Union[str, Any] = range(1 , snake_case__ )
snake_case__ : Optional[Any] = range(1 , snake_case__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F'''{solution(1_0, 2_2) = }''')
| 701 |
'''simple docstring'''
def _A ( snake_case__ : int = 4_00_00_00 ):
snake_case__ : int = []
snake_case__ ,snake_case__ : Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case__ )
snake_case__ ,snake_case__ : Any = b, a + b
return sum(snake_case__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 0 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_lowerCAmelCase : int = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=16 , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=14 , lowerCamelCase=10 , lowerCamelCase=19 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=True , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=[1, 2, 3, 4, 5] , lowerCamelCase=25 , lowerCamelCase=5 , ) -> Tuple:
"""simple docstring"""
snake_case__ : int = d_model
snake_case__ : List[Any] = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Dict = prediction_length
snake_case__ : Optional[int] = context_length
snake_case__ : Tuple = cardinality
snake_case__ : str = num_time_features
snake_case__ : Optional[Any] = lags_sequence
snake_case__ : Optional[Any] = embedding_dimension
snake_case__ : Dict = is_training
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : str = num_hidden_layers
snake_case__ : Dict = num_attention_heads
snake_case__ : List[str] = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = context_length
snake_case__ : List[Any] = prediction_length + label_length
snake_case__ : Optional[Any] = label_length
snake_case__ : str = moving_average
snake_case__ : Dict = autocorrelation_factor
def lowercase__ ( self ) -> str:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowercase__ ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = config.context_length + max(config.lags_sequence )
snake_case__ : int = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
snake_case__ : int = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
snake_case__ : Dict = floats_tensor([self.batch_size, _past_length] )
snake_case__ : int = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
snake_case__ : str = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, config.prediction_length] )
snake_case__ : Any = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Dict = self.get_config()
snake_case__ : Dict = self.prepare_autoformer_inputs_dict(lowerCamelCase )
return config, inputs_dict
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : int = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Optional[int] = AutoformerModel(config=lowerCamelCase ).to(lowerCamelCase ).eval()
snake_case__ : Tuple = model(**lowerCamelCase )
snake_case__ : Optional[int] = outputs.encoder_last_hidden_state
snake_case__ : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : int = model.get_encoder()
encoder.save_pretrained(lowerCamelCase )
snake_case__ : List[str] = AutoformerEncoder.from_pretrained(lowerCamelCase ).to(lowerCamelCase )
snake_case__ : Dict = model.create_network_inputs(**lowerCamelCase )
snake_case__ : List[Any] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
snake_case__ : int = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
snake_case__ : Tuple = encoder(inputs_embeds=lowerCamelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
snake_case__ : Optional[int] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
snake_case__ : List[Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
snake_case__ : List[str] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
snake_case__ : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : str = model.get_decoder()
decoder.save_pretrained(lowerCamelCase )
snake_case__ : Optional[int] = AutoformerDecoder.from_pretrained(lowerCamelCase ).to(lowerCamelCase )
snake_case__ : int = decoder(
trend=lowerCamelCase , inputs_embeds=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_lowerCAmelCase = (AutoformerForPrediction,) if is_torch_available() else ()
_lowerCAmelCase = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = AutoformerModelTester(self )
snake_case__ : int = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
snake_case__ : Tuple = model_class.from_pretrained(lowerCamelCase , output_loading_info=lowerCamelCase )
self.assertEqual(info['''missing_keys'''] , [] )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCamelCase )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : int = inspect.signature(getattr(lowerCamelCase , '''forward''' ) )
# The main input is the name of the argument after `self`
snake_case__ : Optional[Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(lowerCamelCase )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : int = [*signature.parameters.keys()]
snake_case__ : Optional[Any] = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(lowerCamelCase )] , lowerCamelCase )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Tuple = True
snake_case__ : Optional[Any] = getattr(self.model_tester , '''seq_length''' , lowerCamelCase )
snake_case__ : Tuple = getattr(self.model_tester , '''decoder_seq_length''' , lowerCamelCase )
snake_case__ : List[str] = getattr(self.model_tester , '''encoder_seq_length''' , lowerCamelCase )
snake_case__ : str = getattr(self.model_tester , '''d_model''' , lowerCamelCase )
snake_case__ : Optional[int] = getattr(self.model_tester , '''num_attention_heads''' , lowerCamelCase )
snake_case__ : str = d_model // num_attention_heads
for model_class in self.all_model_classes:
snake_case__ : List[str] = True
snake_case__ : int = False
snake_case__ : Optional[Any] = True
snake_case__ : Tuple = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : List[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
snake_case__ : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ : Any = True
snake_case__ : str = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : List[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
snake_case__ : Optional[int] = outputs.encoder_attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
snake_case__ : Union[str, Any] = len(lowerCamelCase )
snake_case__ : List[str] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowerCamelCase , lowerCamelCase )
# decoder attentions
snake_case__ : Tuple = outputs.decoder_attentions
self.assertIsInstance(lowerCamelCase , (list, tuple) )
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
snake_case__ : Dict = outputs.cross_attentions
self.assertIsInstance(lowerCamelCase , (list, tuple) )
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
snake_case__ : int = True
snake_case__ : str = True
snake_case__ : Optional[Any] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : str = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(out_len + 2 , len(lowerCamelCase ) )
snake_case__ : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowercase__ ( self ) -> int:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def _A ( snake_case__ : str="train-batch.pt" ):
snake_case__ : Any = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=snake_case__ , repo_type='''dataset''' )
snake_case__ : Tuple = torch.load(snake_case__ , map_location=snake_case__ )
return batch
@require_torch
@slow
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Tuple = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(lowerCamelCase )
snake_case__ : List[Any] = prepare_batch()
with torch.no_grad():
snake_case__ : int = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
snake_case__ : Optional[int] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowerCamelCase )
snake_case__ : Optional[int] = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=lowerCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Dict = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(lowerCamelCase )
snake_case__ : List[str] = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
snake_case__ : Tuple = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
snake_case__ : Any = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowerCamelCase )
snake_case__ : Dict = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=lowerCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Any = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(lowerCamelCase )
snake_case__ : Dict = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
snake_case__ : Optional[Any] = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
snake_case__ : List[str] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowerCamelCase )
snake_case__ : str = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=lowerCamelCase )
snake_case__ : List[str] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCamelCase , rtol=1E-1 ) )
| 702 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : int = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
_lowerCAmelCase : Optional[int] = {
"google/pegasus-xsum": 5_1_2,
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PegasusTokenizer
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<pad>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<mask_2>" , lowerCamelCase="<mask_1>" , lowerCamelCase=None , lowerCamelCase=103 , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCamelCase )}, but is'''
f''' {type(lowerCamelCase )}''' )
snake_case__ : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCamelCase ) , self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
snake_case__ : List[Any] = additional_special_tokens_extended
else:
snake_case__ : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , pad_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , mask_token=lowerCamelCase , mask_token_sent=lowerCamelCase , offset=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Union[str, Any] = vocab_file
snake_case__ : List[Any] = False if not self.vocab_file else True
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Tuple = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : int = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 694 | 0 |
'''simple docstring'''
def _A ( snake_case__ : list[list] ):
snake_case__ : Any = current_set.copy()
for row_index, row in enumerate(snake_case__ ):
snake_case__ : Union[str, Any] = row[0]
for column_index, column in enumerate(snake_case__ ):
if magnitude == 0:
snake_case__ : Optional[Any] = column
continue
snake_case__ : Union[str, Any] = column / magnitude
# Subtract to cancel term
snake_case__ : Union[str, Any] = current_set[0]
snake_case__ : Tuple = [first_row]
snake_case__ : Dict = current_set[1::]
for row in current_set:
snake_case__ : Any = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(snake_case__ )
continue
for column_index in range(len(snake_case__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(snake_case__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
snake_case__ : Optional[Any] = final_set[0]
snake_case__ : Optional[int] = []
snake_case__ : str = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
snake_case__ : Optional[Any] = simplify(snake_case__ )
for i in range(len(snake_case__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , snake_case__ )
snake_case__ : Dict = resultant
return final_set
def _A ( snake_case__ : list[list] ):
if len(snake_case__ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
snake_case__ : int = len(snake_case__ ) + 1
if any(len(snake_case__ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(snake_case__ , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(snake_case__ ) == 1:
return [equations[0][-1] / equations[0][0]]
snake_case__ : Optional[int] = equations.copy()
if any(0 in row for row in data_set ):
snake_case__ : List[str] = data_set.copy()
snake_case__ : Optional[int] = []
for row_index, row in enumerate(snake_case__ ):
if 0 not in row:
snake_case__ : int = data_set.pop(snake_case__ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , snake_case__ )
snake_case__ : Union[str, Any] = data_set.copy()
snake_case__ : Dict = simplify(snake_case__ )
snake_case__ : str = simplified[::-1]
snake_case__ : list = []
for row in simplified:
snake_case__ : Any = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
snake_case__ : Dict = row.copy()[: len(snake_case__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(snake_case__ ) == 0:
solutions.append(0 )
continue
snake_case__ : Tuple = temp_row[1::]
snake_case__ : Dict = temp_row[::-1]
for column_index, column in enumerate(snake_case__ ):
current_solution -= column * solutions[column_index]
solutions.append(snake_case__ )
snake_case__ : List[Any] = []
for item in solutions:
final.append(float(round(snake_case__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Dict = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 703 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=0 ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[Any] = 1.0 if scale is None else scale
snake_case__ : Dict = 0.0 if loc is None else loc
super().__init__(lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase )] )
@property
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return self.variance.sqrt()
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Tuple = args_dim
snake_case__ : str = nn.ModuleList([nn.Linear(lowerCamelCase , lowerCamelCase ) for dim in args_dim.values()] )
snake_case__ : Optional[int] = domain_map
def lowercase__ ( self , lowerCamelCase ) -> Tuple[torch.Tensor]:
"""simple docstring"""
snake_case__ : Any = [proj(lowerCamelCase ) for proj in self.proj]
return self.domain_map(*lowerCamelCase )
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Tuple = function
def lowercase__ ( self , lowerCamelCase , *lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.function(lowerCamelCase , *lowerCamelCase )
class snake_case :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self , lowerCamelCase = 1 ) -> None:
"""simple docstring"""
snake_case__ : Optional[Any] = dim
snake_case__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowerCamelCase )
else:
return Independent(self.distribution_class(*lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Distribution:
"""simple docstring"""
snake_case__ : List[Any] = self._base_distribution(lowerCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase , loc=lowerCamelCase , scale=lowerCamelCase , event_dim=self.event_dim )
@property
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def lowercase__ ( self ) -> float:
"""simple docstring"""
return 0.0
def lowercase__ ( self , lowerCamelCase ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowercase__ ( self , *lowerCamelCase ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def lowercase__ ( lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Tuple = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
snake_case__ : Optional[int] = 2.0 + cls.squareplus(lowerCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowercase__ ( self , lowerCamelCase ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : str = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 | 0 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_lowerCAmelCase : Any = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class snake_case ( unittest.TestCase , __lowerCamelCase ):
"""simple docstring"""
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = load_tool('''text-question-answering''' )
self.tool.setup()
snake_case__ : Optional[Any] = load_tool('''text-question-answering''' , remote=lowerCamelCase )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : str = self.tool(lowerCamelCase , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(lowerCamelCase , '''launched the BigScience Research Workshop''' )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple = self.remote_tool(lowerCamelCase , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(lowerCamelCase , '''launched the BigScience Research Workshop''' )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : int = self.tool(text=lowerCamelCase , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(lowerCamelCase , '''launched the BigScience Research Workshop''' )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Tuple = self.remote_tool(text=lowerCamelCase , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(lowerCamelCase , '''launched the BigScience Research Workshop''' )
| 704 |
'''simple docstring'''
from math import factorial
def _A ( snake_case__ : int = 20 ):
snake_case__ : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case__ : Union[str, Any] = n // 2
return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
_lowerCAmelCase : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 694 | 0 |
def _A ( snake_case__ : int = 50 ):
snake_case__ : Optional[int] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 705 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = (EulerDiscreteScheduler,)
_lowerCAmelCase = 1_0
def lowercase__ ( self , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Any = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCamelCase )
return config
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Any = self.get_scheduler_config()
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : Any = self.dummy_model()
snake_case__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : List[Any] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : int = model(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Tuple = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Tuple = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Optional[Any] = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : Optional[int] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : Union[str, Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Union[str, Any] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : List[str] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Optional[int] = self.get_scheduler_config()
snake_case__ : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Tuple = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : str = model(lowerCamelCase , lowerCamelCase )
snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : int = output.prev_sample
snake_case__ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : int = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Dict = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config()
snake_case__ : List[Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Dict = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Optional[Any] = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : Dict = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
| 694 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , ) -> str:
"""simple docstring"""
snake_case__ : List[str] = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case__ : Union[str, Any] = parent
snake_case__ : List[Any] = batch_size
snake_case__ : Optional[Any] = num_channels
snake_case__ : Optional[int] = image_size
snake_case__ : Dict = min_resolution
snake_case__ : Dict = max_resolution
snake_case__ : Dict = do_resize
snake_case__ : Optional[Any] = size
snake_case__ : List[str] = do_normalize
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = ImageGPTImageProcessor if is_vision_available() else None
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = ImageGPTImageProcessingTester(self )
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , '''clusters''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''do_normalize''' ) )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : int = self.image_processing_class(**self.image_processor_dict )
snake_case__ : Union[str, Any] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCamelCase )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = os.path.join(lowerCamelCase , '''image_processor.json''' )
image_processor_first.to_json_file(lowerCamelCase )
snake_case__ : List[str] = self.image_processing_class.from_json_file(lowerCamelCase ).to_dict()
snake_case__ : int = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCamelCase )
snake_case__ : Any = self.image_processing_class.from_pretrained(lowerCamelCase ).to_dict()
snake_case__ : Dict = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
pass
def _A ( ):
snake_case__ : str = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
snake_case__ : Any = Image.open(dataset[4]['''file'''] )
snake_case__ : int = Image.open(dataset[5]['''file'''] )
snake_case__ : List[str] = [imagea, imagea]
return images
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Any = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
snake_case__ : List[str] = prepare_images()
# test non-batched
snake_case__ : int = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
snake_case__ : str = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCamelCase )
# test batched
snake_case__ : Optional[int] = image_processing(lowerCamelCase , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
snake_case__ : Tuple = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCamelCase )
| 706 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = ['pixel_values']
def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : int = do_resize
snake_case__ : Dict = do_rescale
snake_case__ : Any = size_divisor
snake_case__ : str = resample
super().__init__(**lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
snake_case__ ,snake_case__ : Any = get_image_size(lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
snake_case__ : Any = height // size_divisor * size_divisor
snake_case__ : Union[str, Any] = width // size_divisor * size_divisor
snake_case__ : Tuple = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
return image
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> BatchFeature:
"""simple docstring"""
snake_case__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : Any = size_divisor if size_divisor is not None else self.size_divisor
snake_case__ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
snake_case__ : Optional[Any] = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
snake_case__ : Optional[int] = [to_numpy_array(lowerCamelCase ) for img in images]
if do_resize:
snake_case__ : Union[str, Any] = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
snake_case__ : str = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images]
snake_case__ : Tuple = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
snake_case__ : str = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 694 | 0 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : List[Any] = 3
snake_case__ : int = 250
snake_case__ : Any = ids_tensor((batch_size, length) , lowerCamelCase )
snake_case__ : Any = torch.ones((batch_size, length) , device=lowerCamelCase , dtype=torch.float ) / length
return input_ids, scores
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Tuple = self._get_tensors(5 )
snake_case__ : int = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCamelCase , lowerCamelCase ) )
snake_case__ : List[Any] = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase , lowerCamelCase ) )
snake_case__ : List[str] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase , lowerCamelCase ) )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Tuple = MaxLengthCriteria(max_length=10 )
snake_case__ : str = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase , lowerCamelCase ) )
snake_case__ : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase , lowerCamelCase ) )
snake_case__ : str = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase , lowerCamelCase ) )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[str] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
snake_case__ : int = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase , lowerCamelCase ) )
snake_case__ : str = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase , lowerCamelCase ) )
snake_case__ : Any = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase , lowerCamelCase ) )
snake_case__ : List[str] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : List[Any] = self._get_tensors(5 )
snake_case__ : List[str] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCamelCase , lowerCamelCase ) )
snake_case__ : List[str] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCamelCase , lowerCamelCase ) )
def lowercase__ ( self ) -> str:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCamelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
snake_case__ : Optional[Any] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCamelCase ) , 1 )
| 707 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ):
snake_case__ : List[Any] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
| 694 | 0 |
import os
import string
import sys
_lowerCAmelCase : Optional[int] = 1 << 8
_lowerCAmelCase : Tuple = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 2_7,
"up": 6_5 + ARROW_KEY_FLAG,
"down": 6_6 + ARROW_KEY_FLAG,
"right": 6_7 + ARROW_KEY_FLAG,
"left": 6_8 + ARROW_KEY_FLAG,
"mod_int": 9_1,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 5_0,
"delete": 5_1,
"pg_up": 5_3,
"pg_down": 5_4,
}
_lowerCAmelCase : List[Any] = KEYMAP["up"]
_lowerCAmelCase : Tuple = KEYMAP["left"]
if sys.platform == "win32":
_lowerCAmelCase : str = []
_lowerCAmelCase : int = {
B"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
B"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(1_0):
_lowerCAmelCase : List[str] = ord(str(i))
def _A ( ):
if os.name == "nt":
import msvcrt
snake_case__ : Tuple = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(snake_case__ ) == 0:
# Read the keystroke
snake_case__ : Dict = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case__ : Optional[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case__ : List[str] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(snake_case__ )
if ord(snake_case__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
snake_case__ : Union[str, Any] = chr(KEYMAP['''esc'''] )
except KeyError:
snake_case__ : Optional[Any] = cha[1]
else:
snake_case__ : Any = ch.decode(snake_case__ )
else:
snake_case__ : List[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case__ : Optional[Any] = sys.stdin.fileno()
snake_case__ : Optional[int] = termios.tcgetattr(snake_case__ )
try:
tty.setraw(snake_case__ )
snake_case__ : Union[str, Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(snake_case__ , termios.TCSADRAIN , snake_case__ )
return ch
def _A ( ):
snake_case__ : str = get_raw_chars()
if ord(snake_case__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(snake_case__ ) == KEYMAP["esc"]:
snake_case__ : List[str] = get_raw_chars()
if ord(snake_case__ ) == KEYMAP["mod_int"]:
snake_case__ : List[str] = get_raw_chars()
if ord(snake_case__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(snake_case__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(snake_case__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 708 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
snake_case__ : Optional[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
'''simple docstring'''
from __future__ import annotations
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> None:
"""simple docstring"""
snake_case__ : Optional[int] = order
# a_{0} ... a_{k}
snake_case__ : Union[str, Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
snake_case__ : Dict = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
snake_case__ : List[str] = [0.0] * self.order
# y[n-1] ... y[n-k]
snake_case__ : Union[str, Any] = [0.0] * self.order
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
if len(lowerCamelCase ) < self.order:
snake_case__ : Dict = [1.0, *a_coeffs]
if len(lowerCamelCase ) != self.order + 1:
snake_case__ : int = (
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
if len(lowerCamelCase ) != self.order + 1:
snake_case__ : str = (
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
snake_case__ : Dict = a_coeffs
snake_case__ : int = b_coeffs
def lowercase__ ( self , lowerCamelCase ) -> float:
"""simple docstring"""
snake_case__ : int = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
snake_case__ : Tuple = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
snake_case__ : int = self.input_history[:-1]
snake_case__ : Optional[int] = self.output_history[:-1]
snake_case__ : int = sample
snake_case__ : Dict = result
return result
| 709 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCAmelCase : Union[str, Any] = "\nimport os\n"
_lowerCAmelCase : Optional[int] = "\ndef foo():\n import os\n return False\n"
_lowerCAmelCase : Union[str, Any] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
_lowerCAmelCase : str = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
_lowerCAmelCase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
_lowerCAmelCase : List[str] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
_lowerCAmelCase : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case__ )
def _A ( snake_case__ : List[str] , snake_case__ : Dict ):
snake_case__ : str = os.path.join(snake_case__ , '''test_file.py''' )
with open(snake_case__ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case__ )
snake_case__ : int = get_imports(snake_case__ )
assert parsed_imports == ["os"]
| 694 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCAmelCase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
_lowerCAmelCase : List[str] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
_lowerCAmelCase : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _A ( snake_case__ : str ):
with open(snake_case__ , '''rb''' ) as f:
snake_case__ : List[Any] = Image.open(snake_case__ )
return im.convert('''RGB''' )
@dataclass
class snake_case :
"""simple docstring"""
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_lowerCAmelCase = field(default=__lowerCamelCase , metadata={'help': 'A folder containing the training data.'} )
_lowerCAmelCase = field(default=__lowerCamelCase , metadata={'help': 'A folder containing the validation data.'} )
_lowerCAmelCase = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowercase__ ( self ) -> int:
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class snake_case :
"""simple docstring"""
_lowerCAmelCase = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__lowerCamelCase )} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
_lowerCAmelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCAmelCase = field(default=__lowerCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _A ( snake_case__ : str ):
snake_case__ : Tuple = torch.stack([example['''pixel_values'''] for example in examples] )
snake_case__ : Optional[Any] = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''' , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case__ : Any = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
snake_case__ : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case__ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
snake_case__ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , )
else:
snake_case__ : List[str] = {}
if data_args.train_dir is not None:
snake_case__ : List[str] = os.path.join(data_args.train_dir , '''**''' )
if data_args.validation_dir is not None:
snake_case__ : Dict = os.path.join(data_args.validation_dir , '''**''' )
snake_case__ : Tuple = load_dataset(
'''imagefolder''' , data_files=snake_case__ , cache_dir=model_args.cache_dir , task='''image-classification''' , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case__ : List[str] = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case__ ) and data_args.train_val_split > 0.0:
snake_case__ : Optional[Any] = dataset['''train'''].train_test_split(data_args.train_val_split )
snake_case__ : Dict = split['''train''']
snake_case__ : Optional[Any] = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case__ : int = dataset['''train'''].features['''labels'''].names
snake_case__ : Tuple = {}, {}
for i, label in enumerate(snake_case__ ):
snake_case__ : Union[str, Any] = str(snake_case__ )
snake_case__ : str = label
# Load the accuracy metric from the datasets package
snake_case__ : Optional[int] = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(snake_case__ : List[str] ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
snake_case__ : List[str] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(snake_case__ ) , labelaid=snake_case__ , idalabel=snake_case__ , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case__ : Any = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
snake_case__ : str = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
snake_case__ : Optional[Any] = image_processor.size['''shortest_edge''']
else:
snake_case__ : Optional[int] = (image_processor.size['''height'''], image_processor.size['''width'''])
snake_case__ : Optional[int] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
snake_case__ : Any = Compose(
[
RandomResizedCrop(snake_case__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
snake_case__ : Optional[Any] = Compose(
[
Resize(snake_case__ ),
CenterCrop(snake_case__ ),
ToTensor(),
normalize,
] )
def train_transforms(snake_case__ : Tuple ):
snake_case__ : Optional[Any] = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(snake_case__ : Any ):
snake_case__ : Any = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
snake_case__ : Optional[int] = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(snake_case__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
snake_case__ : Optional[Any] = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(snake_case__ )
# Initalize our trainer
snake_case__ : Optional[Any] = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=snake_case__ , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
snake_case__ : List[str] = None
if training_args.resume_from_checkpoint is not None:
snake_case__ : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case__ : int = last_checkpoint
snake_case__ : Optional[Any] = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case__ : Optional[int] = trainer.evaluate()
trainer.log_metrics('''eval''' , snake_case__ )
trainer.save_metrics('''eval''' , snake_case__ )
# Write model card and (optionally) push to hub
snake_case__ : Any = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
if __name__ == "__main__":
main()
| 710 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'markuplm'
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=256 , lowerCamelCase=1024 , lowerCamelCase=216 , lowerCamelCase=1001 , lowerCamelCase=32 , lowerCamelCase=50 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> str:
"""simple docstring"""
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Optional[int] = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : List[Any] = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Optional[Any] = position_embedding_type
snake_case__ : Dict = use_cache
snake_case__ : int = classifier_dropout
# additional properties
snake_case__ : Union[str, Any] = max_depth
snake_case__ : Dict = max_xpath_tag_unit_embeddings
snake_case__ : Any = max_xpath_subs_unit_embeddings
snake_case__ : int = tag_pad_id
snake_case__ : Tuple = subs_pad_id
snake_case__ : Dict = xpath_unit_hidden_size
| 694 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[int] = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
snake_case__ : str = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(lowerCamelCase )
from datasets import load_dataset
snake_case__ : int = load_dataset('''nielsr/rvlcdip-demo''' )
snake_case__ : List[str] = dataset['''train'''][0]['''image'''].convert('''RGB''' )
snake_case__ : Union[str, Any] = image_processor(lowerCamelCase , return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : List[str] = model(**lowerCamelCase )
snake_case__ : List[Any] = outputs.logits
snake_case__ : Tuple = torch.Size((1, 16) )
self.assertEqual(logits.shape , lowerCamelCase )
snake_case__ : List[str] = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=lowerCamelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
| 711 |
'''simple docstring'''
def _A ( snake_case__ : float ):
return 10 - x * x
def _A ( snake_case__ : float , snake_case__ : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case__ ) * equation(snake_case__ ) >= 0:
raise ValueError('''Wrong space!''' )
snake_case__ : List[str] = a
while (b - a) >= 0.01:
# Find middle point
snake_case__ : Optional[int] = (a + b) / 2
# Check if middle point is root
if equation(snake_case__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case__ ) * equation(snake_case__ ) < 0:
snake_case__ : Dict = c
else:
snake_case__ : List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 694 | 0 |
'''simple docstring'''
import itertools
import os
import re
_lowerCAmelCase : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])")
_lowerCAmelCase : int = re.compile(R"([a-z\d])([A-Z])")
_lowerCAmelCase : Tuple = re.compile(R"(?<!_)_(?!_)")
_lowerCAmelCase : Dict = re.compile(R"(_{2,})")
_lowerCAmelCase : List[Any] = R"^\w+(\.\w+)*$"
_lowerCAmelCase : Dict = R"<>:/\|?*"
def _A ( snake_case__ : Any ):
snake_case__ : Any = _uppercase_uppercase_re.sub(R'''\1_\2''' , snake_case__ )
snake_case__ : Optional[int] = _lowercase_uppercase_re.sub(R'''\1_\2''' , snake_case__ )
return name.lower()
def _A ( snake_case__ : Dict ):
snake_case__ : List[str] = _single_underscore_re.split(snake_case__ )
snake_case__ : str = [_multiple_underscores_re.split(snake_case__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(snake_case__ ) if n != '''''' )
def _A ( snake_case__ : Optional[Any] ):
if os.path.basename(snake_case__ ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(snake_case__ )
def _A ( snake_case__ : str , snake_case__ : List[str] ):
if os.path.basename(snake_case__ ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , snake_case__ ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(snake_case__ )}-{split}'''
def _A ( snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : int , snake_case__ : str=None ):
snake_case__ : str = filename_prefix_for_split(snake_case__ , snake_case__ )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
snake_case__ : Tuple = os.path.join(snake_case__ , snake_case__ )
return f'''{filepath}*'''
def _A ( snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Optional[Any]=None , snake_case__ : Dict=None ):
snake_case__ : str = filename_prefix_for_split(snake_case__ , snake_case__ )
snake_case__ : Tuple = os.path.join(snake_case__ , snake_case__ )
if shard_lengths:
snake_case__ : str = len(snake_case__ )
snake_case__ : Tuple = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(snake_case__ )]
if filetype_suffix:
snake_case__ : Tuple = [filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
snake_case__ : int = prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 712 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : list[float] , snake_case__ : list[float] ):
snake_case__ : Dict = sorted(numsa + numsa )
snake_case__ ,snake_case__ : Tuple = divmod(len(snake_case__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCAmelCase : List[str] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 694 | 0 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_lowerCAmelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_lowerCAmelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
_lowerCAmelCase : set[int] = {ord(char) for char in VALID_CHARS}
_lowerCAmelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def _A ( snake_case__ : list[int] , snake_case__ : tuple[int, ...] ):
snake_case__ : str = ""
snake_case__ : int
snake_case__ : int
snake_case__ : int
for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ):
snake_case__ : List[str] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case__ )
return decoded
def _A ( snake_case__ : list[int] ):
snake_case__ : list[str] = []
for key in product(snake_case__ , repeat=3 ):
snake_case__ : Union[str, Any] = try_key(snake_case__ , snake_case__ )
if encoded is not None:
possibles.append(snake_case__ )
return possibles
def _A ( snake_case__ : list[str] , snake_case__ : str ):
return [possible for possible in possibles if common_word in possible.lower()]
def _A ( snake_case__ : str = "p059_cipher.txt" ):
snake_case__ : list[int]
snake_case__ : list[str]
snake_case__ : str
snake_case__ : str
snake_case__ : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding='''utf-8''' )
snake_case__ : Optional[Any] = [int(snake_case__ ) for number in data.strip().split(''',''' )]
snake_case__ : List[Any] = filter_valid_chars(snake_case__ )
for common_word in COMMON_WORDS:
snake_case__ : Optional[int] = filter_common_word(snake_case__ , snake_case__ )
if len(snake_case__ ) == 1:
break
snake_case__ : Any = possibles[0]
return sum(ord(snake_case__ ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 713 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Any = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 | 0 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 714 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'encoder-decoder'
_lowerCAmelCase = True
def __init__( self , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case__ : List[str] = kwargs.pop('''encoder''' )
snake_case__ : Any = encoder_config.pop('''model_type''' )
snake_case__ : List[str] = kwargs.pop('''decoder''' )
snake_case__ : str = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case__ : Tuple = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : Optional[Any] = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : str = True
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case__ : Optional[int] = True
snake_case__ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = copy.deepcopy(self.__dict__ )
snake_case__ : List[Any] = self.encoder.to_dict()
snake_case__ : str = self.decoder.to_dict()
snake_case__ : Any = self.__class__.model_type
return output
| 694 | 0 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCAmelCase : Union[str, Any] = "\nimport os\n"
_lowerCAmelCase : Optional[int] = "\ndef foo():\n import os\n return False\n"
_lowerCAmelCase : Union[str, Any] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
_lowerCAmelCase : str = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
_lowerCAmelCase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
_lowerCAmelCase : List[str] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
_lowerCAmelCase : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case__ )
def _A ( snake_case__ : List[str] , snake_case__ : Dict ):
snake_case__ : str = os.path.join(snake_case__ , '''test_file.py''' )
with open(snake_case__ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case__ )
snake_case__ : int = get_imports(snake_case__ )
assert parsed_imports == ["os"]
| 715 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
_lowerCAmelCase : Dict = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
_lowerCAmelCase : str = {"vinai/bartpho-syllable": 1_0_2_4}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
snake_case__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
snake_case__ : int = vocab_file
snake_case__ : Optional[Any] = monolingual_vocab_file
snake_case__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : List[str] = cnt
cnt += 1
with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
snake_case__ : Optional[int] = line.strip().split()[0]
snake_case__ : List[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : Any = len(self.fairseq_tokens_to_ids )
snake_case__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = self.__dict__.copy()
snake_case__ : Any = None
snake_case__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : Dict = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : List[str] = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : int = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , '''wb''' ) as fi:
snake_case__ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowerCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> None:
"""simple docstring"""
snake_case__ : Any = num_of_nodes
snake_case__ : list[list[int]] = []
snake_case__ : dict[int, int] = {}
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self , lowerCamelCase ) -> None:
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
snake_case__ : str = self.find_component(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
snake_case__ : str = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
snake_case__ : Union[str, Any] = self.find_component(lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCamelCase )
def lowercase__ ( self ) -> None:
"""simple docstring"""
snake_case__ : str = []
snake_case__ : int = 0
snake_case__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
snake_case__ : str = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
snake_case__ : Tuple = edge
snake_case__ : List[Any] = self.m_component[u]
snake_case__ : List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
snake_case__ : Any = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : Union[str, Any] = edge
snake_case__ : int = self.m_component[u]
snake_case__ : List[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCamelCase , lowerCamelCase , lowerCamelCase )
print(f'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
snake_case__ : Dict = [-1] * self.m_num_of_nodes
print(f'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def _A ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowerCAmelCase : str = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowerCAmelCase : Optional[int] = parser.parse_args()
_lowerCAmelCase : Union[str, Any] = "cpu"
_lowerCAmelCase : List[str] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowerCAmelCase : Union[str, Any] = "path-to-your-trained-model"
_lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowerCAmelCase : Optional[Any] = pipe.to(device)
# to channels last
_lowerCAmelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last)
_lowerCAmelCase : str = pipe.vae.to(memory_format=torch.channels_last)
_lowerCAmelCase : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowerCAmelCase : Optional[int] = torch.randn(2, 4, 6_4, 6_4)
_lowerCAmelCase : List[str] = torch.rand(1) * 9_9_9
_lowerCAmelCase : Optional[int] = torch.randn(2, 7_7, 7_6_8)
_lowerCAmelCase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowerCAmelCase : Tuple = 6_6_6
_lowerCAmelCase : str = torch.Generator(device).manual_seed(seed)
_lowerCAmelCase : Dict = {"generator": generator}
if args.steps is not None:
_lowerCAmelCase : Tuple = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowerCAmelCase : Any = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 694 | 0 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _A ( snake_case__ : Callable , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float ):
snake_case__ : Dict = int(np.ceil((x_end - xa) / step_size ) )
snake_case__ : Dict = np.zeros((n + 1,) )
snake_case__ : int = ya
snake_case__ : Optional[int] = xa
for k in range(snake_case__ ):
snake_case__ : List[str] = y[k] + step_size * ode_func(snake_case__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
'''simple docstring'''
import socket
def _A ( ):
snake_case__ : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case__ : str = socket.gethostname()
snake_case__ : Union[str, Any] = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
snake_case__ : int = sock.recv(10_24 )
if not data:
break
out_file.write(snake_case__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 694 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
_lowerCAmelCase : Any = 4
_lowerCAmelCase : str = 3
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
pass
def _A ( snake_case__ : List[str] ):
for shard in shards:
for i in range(snake_case__ ):
yield {"i": i, "shard": shard}
def _A ( ):
snake_case__ : Dict = int(os.environ['''RANK'''] )
snake_case__ : str = int(os.environ['''WORLD_SIZE'''] )
snake_case__ : List[Any] = ArgumentParser()
parser.add_argument('''--streaming''' , type=snake_case__ )
parser.add_argument('''--local_rank''' , type=snake_case__ )
parser.add_argument('''--num_workers''' , type=snake_case__ , default=0 )
snake_case__ : List[str] = parser.parse_args()
snake_case__ : Any = args.streaming
snake_case__ : Any = args.num_workers
snake_case__ : Optional[int] = {'''shards''': [f'''shard_{shard_idx}''' for shard_idx in range(snake_case__ )]}
snake_case__ : Union[str, Any] = IterableDataset.from_generator(snake_case__ , gen_kwargs=snake_case__ )
if not streaming:
snake_case__ : Any = Dataset.from_list(list(snake_case__ ) )
snake_case__ : List[Any] = split_dataset_by_node(snake_case__ , rank=snake_case__ , world_size=snake_case__ )
snake_case__ : str = torch.utils.data.DataLoader(snake_case__ , num_workers=snake_case__ )
snake_case__ : str = NUM_SHARDS * NUM_ITEMS_PER_SHARD
snake_case__ : Tuple = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
snake_case__ : Dict = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 718 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
snake_case__ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'trocr'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=1024 , lowerCamelCase=12 , lowerCamelCase=16 , lowerCamelCase=4096 , lowerCamelCase="gelu" , lowerCamelCase=512 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = vocab_size
snake_case__ : List[Any] = d_model
snake_case__ : Optional[int] = decoder_layers
snake_case__ : List[str] = decoder_attention_heads
snake_case__ : List[str] = decoder_ffn_dim
snake_case__ : Optional[int] = activation_function
snake_case__ : Any = max_position_embeddings
snake_case__ : List[Any] = dropout
snake_case__ : Any = attention_dropout
snake_case__ : Tuple = activation_dropout
snake_case__ : Optional[int] = init_std
snake_case__ : int = decoder_layerdrop
snake_case__ : Any = use_cache
snake_case__ : Optional[int] = scale_embedding
snake_case__ : Optional[Any] = use_learned_position_embeddings
snake_case__ : List[str] = layernorm_embedding
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , decoder_start_token_id=lowerCamelCase , **lowerCamelCase , )
| 719 |
'''simple docstring'''
from math import isqrt
def _A ( snake_case__ : int ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) )
def _A ( snake_case__ : int = 10**6 ):
snake_case__ : str = 0
snake_case__ : List[str] = 1
snake_case__ : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 0 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase = "cpu" , lowerCamelCase = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
snake_case__ : Union[str, Any] = device
snake_case__ : int = CLIPTokenizerFast.from_pretrained(lowerCamelCase )
snake_case__ : str = [0.48_145_466, 0.4_578_275, 0.40_821_073]
snake_case__ : Dict = [0.26_862_954, 0.26_130_258, 0.27_577_711]
snake_case__ : List[Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
snake_case__ : Optional[Any] = torchvision.transforms.Resize(224 )
snake_case__ : int = torchvision.transforms.CenterCrop(224 )
def lowercase__ ( self , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Any = self.resize(lowerCamelCase )
snake_case__ : str = self.center_crop(lowerCamelCase )
snake_case__ : List[Any] = self.normalize(lowerCamelCase )
return images
def __call__( self , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : int = self.tokenizer(text=lowerCamelCase , **lowerCamelCase )
snake_case__ : List[Any] = self.preprocess_img(lowerCamelCase )
snake_case__ : Tuple = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase=10 , lowerCamelCase=0.01 , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase="image" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , ) -> None:
"""simple docstring"""
super().__init__()
snake_case__ : Any = None
snake_case__ : Any = device if device else get_device()
if vqgan:
snake_case__ : Tuple = vqgan
else:
snake_case__ : List[Any] = load_vqgan(self.device , conf_path=lowerCamelCase , ckpt_path=lowerCamelCase )
self.vqgan.eval()
if clip:
snake_case__ : int = clip
else:
snake_case__ : List[Any] = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
snake_case__ : str = ProcessorGradientFlow(device=self.device )
snake_case__ : Dict = iterations
snake_case__ : str = lr
snake_case__ : Any = log
snake_case__ : str = make_grid
snake_case__ : str = return_val
snake_case__ : Union[str, Any] = quantize
snake_case__ : Optional[Any] = self.vqgan.decoder.z_shape
def lowercase__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=5 , lowerCamelCase=True ) -> Tuple:
"""simple docstring"""
snake_case__ : int = []
if output_path is None:
snake_case__ : List[Any] = '''./animation.gif'''
if input_path is None:
snake_case__ : Optional[int] = self.save_path
snake_case__ : Optional[Any] = sorted(glob(input_path + '''/*''' ) )
if not len(lowerCamelCase ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(lowerCamelCase ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
snake_case__ : int = total_duration / len(lowerCamelCase )
snake_case__ : List[str] = [frame_duration] * len(lowerCamelCase )
if extend_frames:
snake_case__ : Optional[Any] = 1.5
snake_case__ : str = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(lowerCamelCase ) )
imageio.mimsave(lowerCamelCase , lowerCamelCase , duration=lowerCamelCase )
print(f'''gif saved to {output_path}''' )
def lowercase__ ( self , lowerCamelCase=None , lowerCamelCase=None ) -> Any:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
snake_case__ : Tuple = preprocess(Image.open(lowerCamelCase ) , target_image_size=256 ).to(self.device )
snake_case__ : Optional[int] = preprocess_vqgan(lowerCamelCase )
snake_case__ : Optional[Any] = self.vqgan.encode(lowerCamelCase )
return z
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.latent.detach().requires_grad_()
snake_case__ : Optional[int] = base_latent + transform_vector
if self.quantize:
snake_case__ : Optional[Any] = self.vqgan.quantize(lowerCamelCase )
else:
snake_case__ : Optional[Any] = trans_latent
return self.vqgan.decode(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ) -> str:
"""simple docstring"""
snake_case__ : List[str] = self.clip_preprocessor(text=lowerCamelCase , images=lowerCamelCase , return_tensors='''pt''' , padding=lowerCamelCase )
snake_case__ : List[Any] = self.clip(**lowerCamelCase )
snake_case__ : int = clip_outputs.logits_per_image
if weights is not None:
snake_case__ : Dict = similarity_logits * weights
return similarity_logits.sum()
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : List[str] = self._get_clip_similarity(pos_prompts['''prompts'''] , lowerCamelCase , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
snake_case__ : Optional[Any] = self._get_clip_similarity(neg_prompts['''prompts'''] , lowerCamelCase , weights=neg_prompts['''weights'''] )
else:
snake_case__ : int = torch.tensor([1] , device=self.device )
snake_case__ : str = -torch.log(lowerCamelCase ) + torch.log(lowerCamelCase )
return loss
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : Dict = torch.randn_like(self.latent , requires_grad=lowerCamelCase , device=self.device )
snake_case__ : List[str] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
snake_case__ : Optional[Any] = self._add_vector(lowerCamelCase )
snake_case__ : int = loop_post_process(lowerCamelCase )
snake_case__ : List[Any] = self._get_CLIP_loss(lowerCamelCase , lowerCamelCase , lowerCamelCase )
print('''CLIP loss''' , lowerCamelCase )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=lowerCamelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
wandb.init(reinit=lowerCamelCase , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
snake_case__ : List[str] = Image.open(lowerCamelCase )
snake_case__ : Union[str, Any] = image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(lowerCamelCase ) )
def lowercase__ ( self , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
if not prompts:
return []
snake_case__ : str = []
snake_case__ : Dict = []
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : Dict = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(lowerCamelCase , (tuple, list) ):
snake_case__ : Union[str, Any] = prompt[0]
snake_case__ : Dict = float(prompt[1] )
elif ":" in prompt:
snake_case__ : str = prompt.split(''':''' )
snake_case__ : Optional[int] = float(lowerCamelCase )
else:
snake_case__ : str = prompt
snake_case__ : Tuple = 1.0
processed_prompts.append(lowerCamelCase )
weights.append(lowerCamelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCamelCase , device=self.device ),
}
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=None , ) -> Optional[int]:
"""simple docstring"""
if image_path:
snake_case__ : List[str] = self._get_latent(lowerCamelCase )
else:
snake_case__ : Tuple = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowerCamelCase , lowerCamelCase , lowerCamelCase )
assert pos_prompts, "You must provide at least one positive prompt."
snake_case__ : Union[str, Any] = self.process_prompts(lowerCamelCase )
snake_case__ : List[Any] = self.process_prompts(lowerCamelCase )
if save_final and save_path is None:
snake_case__ : Dict = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(lowerCamelCase ):
os.makedirs(lowerCamelCase )
else:
snake_case__ : str = save_path + '''_''' + get_timestamp()
os.makedirs(lowerCamelCase )
snake_case__ : Tuple = save_path
snake_case__ : Dict = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(lowerCamelCase ) )
snake_case__ : int = loop_post_process(lowerCamelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCamelCase , lowerCamelCase , lowerCamelCase ) ):
if show_intermediate:
show_pil(lowerCamelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(lowerCamelCase )} )
if show_final:
show_pil(lowerCamelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}_final.png''' ) )
| 720 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_lowerCAmelCase : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
_lowerCAmelCase : Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
_lowerCAmelCase : List[str] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 , lowerCamelCase="binary" , lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = fa_score(
lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase )
return {"f1": float(lowerCamelCase ) if score.size == 1 else score}
| 694 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase : Optional[Any] = "RegNetConfig"
# Base docstring
_lowerCAmelCase : Union[str, Any] = "facebook/regnet-y-040"
_lowerCAmelCase : Optional[Any] = [1, 1_0_8_8, 7, 7]
# Image classification docstring
_lowerCAmelCase : str = "facebook/regnet-y-040"
_lowerCAmelCase : List[Any] = "tabby, tabby cat"
_lowerCAmelCase : int = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class snake_case ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase = 3 , lowerCamelCase = 1 , lowerCamelCase = 1 , lowerCamelCase = "relu" , **lowerCamelCase , ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
snake_case__ : Dict = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
snake_case__ : List[str] = tf.keras.layers.ConvaD(
filters=lowerCamelCase , kernel_size=lowerCamelCase , strides=lowerCamelCase , padding='''VALID''' , groups=lowerCamelCase , use_bias=lowerCamelCase , name='''convolution''' , )
snake_case__ : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
snake_case__ : int = ACTaFN[activation] if activation is not None else tf.identity
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Tuple = self.convolution(self.padding(lowerCamelCase ) )
snake_case__ : List[str] = self.normalization(lowerCamelCase )
snake_case__ : Optional[int] = self.activation(lowerCamelCase )
return hidden_state
class snake_case ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , **lowerCamelCase ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Tuple = config.num_channels
snake_case__ : Optional[int] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : Any = shape_list(lowerCamelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
snake_case__ : List[Any] = tf.transpose(lowerCamelCase , perm=(0, 2, 3, 1) )
snake_case__ : Optional[int] = self.embedder(lowerCamelCase )
return hidden_state
class snake_case ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase = 2 , **lowerCamelCase ) -> Dict:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : List[Any] = tf.keras.layers.ConvaD(
filters=lowerCamelCase , kernel_size=1 , strides=lowerCamelCase , use_bias=lowerCamelCase , name='''convolution''' )
snake_case__ : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = False ) -> tf.Tensor:
"""simple docstring"""
return self.normalization(self.convolution(lowerCamelCase ) , training=lowerCamelCase )
class snake_case ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase , name='''pooler''' )
snake_case__ : Union[str, Any] = [
tf.keras.layers.ConvaD(filters=lowerCamelCase , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=lowerCamelCase , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def lowercase__ ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : Tuple = self.pooler(lowerCamelCase )
for layer_module in self.attention:
snake_case__ : Optional[int] = layer_module(lowerCamelCase )
snake_case__ : Optional[Any] = hidden_state * pooled
return hidden_state
class snake_case ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Tuple = in_channels != out_channels or stride != 1
snake_case__ : Dict = max(1 , out_channels // config.groups_width )
snake_case__ : int = (
TFRegNetShortCut(lowerCamelCase , stride=lowerCamelCase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
snake_case__ : List[Any] = [
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
lowerCamelCase , stride=lowerCamelCase , groups=lowerCamelCase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=lowerCamelCase , name='''layer.2''' ),
]
snake_case__ : Any = ACTaFN[config.hidden_act]
def lowercase__ ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = hidden_state
for layer_module in self.layers:
snake_case__ : List[Any] = layer_module(lowerCamelCase )
snake_case__ : int = self.shortcut(lowerCamelCase )
hidden_state += residual
snake_case__ : List[Any] = self.activation(lowerCamelCase )
return hidden_state
class snake_case ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 , **lowerCamelCase ) -> int:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : int = in_channels != out_channels or stride != 1
snake_case__ : Optional[Any] = max(1 , out_channels // config.groups_width )
snake_case__ : Tuple = (
TFRegNetShortCut(lowerCamelCase , stride=lowerCamelCase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
snake_case__ : List[str] = [
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
lowerCamelCase , stride=lowerCamelCase , groups=lowerCamelCase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(lowerCamelCase , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=lowerCamelCase , name='''layer.3''' ),
]
snake_case__ : Optional[Any] = ACTaFN[config.hidden_act]
def lowercase__ ( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Dict = hidden_state
for layer_module in self.layers:
snake_case__ : List[str] = layer_module(lowerCamelCase )
snake_case__ : int = self.shortcut(lowerCamelCase )
hidden_state += residual
snake_case__ : Union[str, Any] = self.activation(lowerCamelCase )
return hidden_state
class snake_case ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 2 , lowerCamelCase = 2 , **lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : str = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
snake_case__ : List[str] = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase , lowerCamelCase , lowerCamelCase , stride=lowerCamelCase , name='''layers.0''' ),
*[layer(lowerCamelCase , lowerCamelCase , lowerCamelCase , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowercase__ ( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
for layer_module in self.layers:
snake_case__ : int = layer_module(lowerCamelCase )
return hidden_state
class snake_case ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Any = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
snake_case__ : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase , lowerCamelCase , lowerCamelCase , depth=lowerCamelCase , name=f'''stages.{i+1}''' ) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True ) -> TFBaseModelOutputWithNoAttention:
"""simple docstring"""
snake_case__ : int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case__ : Union[str, Any] = hidden_states + (hidden_state,)
snake_case__ : int = stage_module(lowerCamelCase )
if output_hidden_states:
snake_case__ : str = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase , hidden_states=lowerCamelCase )
@keras_serializable
class snake_case ( tf.keras.layers.Layer ):
"""simple docstring"""
_lowerCAmelCase = RegNetConfig
def __init__( self , lowerCamelCase , **lowerCamelCase ) -> Any:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : str = config
snake_case__ : Optional[Any] = TFRegNetEmbeddings(lowerCamelCase , name='''embedder''' )
snake_case__ : int = TFRegNetEncoder(lowerCamelCase , name='''encoder''' )
snake_case__ : Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase , name='''pooler''' )
@unpack_inputs
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
snake_case__ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : Any = self.embedder(lowerCamelCase , training=lowerCamelCase )
snake_case__ : int = self.encoder(
lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase , training=lowerCamelCase )
snake_case__ : int = encoder_outputs[0]
snake_case__ : int = self.pooler(lowerCamelCase )
# Change to NCHW output format have uniformity in the modules
snake_case__ : str = tf.transpose(lowerCamelCase , perm=(0, 3, 1, 2) )
snake_case__ : Any = tf.transpose(lowerCamelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
snake_case__ : Optional[Any] = tuple([tf.transpose(lowerCamelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase , pooler_output=lowerCamelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = RegNetConfig
_lowerCAmelCase = 'regnet'
_lowerCAmelCase = 'pixel_values'
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_lowerCAmelCase : Dict = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowerCAmelCase : Any = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , __lowerCamelCase , )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) -> Any:
"""simple docstring"""
super().__init__(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
snake_case__ : Tuple = TFRegNetMainLayer(lowerCamelCase , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
"""simple docstring"""
snake_case__ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ : str = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : int = self.regnet(
pixel_values=lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase , training=lowerCamelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __lowerCamelCase , )
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) -> List[str]:
"""simple docstring"""
super().__init__(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
snake_case__ : Dict = config.num_labels
snake_case__ : int = TFRegNetMainLayer(lowerCamelCase , name='''regnet''' )
# classification head
snake_case__ : List[str] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase__ ( self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
"""simple docstring"""
snake_case__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ : int = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : Dict = self.regnet(
lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase , training=lowerCamelCase )
snake_case__ : Any = outputs.pooler_output if return_dict else outputs[1]
snake_case__ : str = self.classifier[0](lowerCamelCase )
snake_case__ : Any = self.classifier[1](lowerCamelCase )
snake_case__ : Any = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase , logits=lowerCamelCase )
if not return_dict:
snake_case__ : Dict = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase , logits=lowerCamelCase , hidden_states=outputs.hidden_states )
| 721 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 42
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase = 65536 , lowerCamelCase = None , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 0 , lowerCamelCase = "fourier" , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = 0.0 , lowerCamelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase = "UNetMidBlock1D" , lowerCamelCase = None , lowerCamelCase = (32, 32, 64) , lowerCamelCase = None , lowerCamelCase = 8 , lowerCamelCase = 1 , lowerCamelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
snake_case__ : Optional[int] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase , log=lowerCamelCase , flip_sin_to_cos=lowerCamelCase )
snake_case__ : List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case__ : Dict = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase , downscale_freq_shift=lowerCamelCase )
snake_case__ : Dict = block_out_channels[0]
if use_timestep_embedding:
snake_case__ : Any = block_out_channels[0] * 4
snake_case__ : Optional[Any] = TimestepEmbedding(
in_channels=lowerCamelCase , time_embed_dim=lowerCamelCase , act_fn=lowerCamelCase , out_dim=block_out_channels[0] , )
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : List[Any] = None
snake_case__ : Union[str, Any] = nn.ModuleList([] )
snake_case__ : List[str] = None
# down
snake_case__ : Tuple = in_channels
for i, down_block_type in enumerate(lowerCamelCase ):
snake_case__ : Tuple = output_channel
snake_case__ : List[str] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case__ : List[Any] = i == len(lowerCamelCase ) - 1
snake_case__ : Dict = get_down_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase )
# mid
snake_case__ : Optional[int] = get_mid_block(
lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase , add_downsample=lowerCamelCase , )
# up
snake_case__ : Union[str, Any] = list(reversed(lowerCamelCase ) )
snake_case__ : Any = reversed_block_out_channels[0]
if out_block_type is None:
snake_case__ : List[Any] = out_channels
else:
snake_case__ : Dict = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase ):
snake_case__ : List[str] = output_channel
snake_case__ : List[str] = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels
)
snake_case__ : List[str] = i == len(lowerCamelCase ) - 1
snake_case__ : str = get_up_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase )
snake_case__ : Optional[Any] = output_channel
# out
snake_case__ : List[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
snake_case__ : Union[str, Any] = get_out_block(
out_block_type=lowerCamelCase , num_groups_out=lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase , act_fn=lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
snake_case__ : str = timestep
if not torch.is_tensor(lowerCamelCase ):
snake_case__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0:
snake_case__ : Optional[Any] = timesteps[None].to(sample.device )
snake_case__ : Any = self.time_proj(lowerCamelCase )
if self.config.use_timestep_embedding:
snake_case__ : Tuple = self.time_mlp(lowerCamelCase )
else:
snake_case__ : Union[str, Any] = timestep_embed[..., None]
snake_case__ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
snake_case__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
snake_case__ : List[Any] = ()
for downsample_block in self.down_blocks:
snake_case__ ,snake_case__ : Optional[int] = downsample_block(hidden_states=lowerCamelCase , temb=lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case__ : Any = self.mid_block(lowerCamelCase , lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
snake_case__ : str = down_block_res_samples[-1:]
snake_case__ : int = down_block_res_samples[:-1]
snake_case__ : Optional[Any] = upsample_block(lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , temb=lowerCamelCase )
# 5. post-process
if self.out_block:
snake_case__ : Dict = self.out_block(lowerCamelCase , lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase )
| 694 | 0 |
'''simple docstring'''
import math
def UpperCamelCase( UpperCAmelCase_ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase( UpperCAmelCase_ = 0.1 ):
UpperCAmelCase : List[str] = 3
UpperCAmelCase : Optional[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(UpperCAmelCase_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 695 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 695 | 1 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowercase__ = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
UpperCAmelCase : Any = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
UpperCAmelCase : Tuple = self.diffusers_dir
shutil.copy(
os.path.join(lowercase_ , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase : Optional[Any] = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : str=None ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
UpperCAmelCase : Optional[Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
UpperCAmelCase : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
UpperCAmelCase : Union[str, Any] = black.format_str(lowercase_ , mode=lowercase_ )
UpperCAmelCase : List[str] = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(lowercase_ , 'w' , newline='\n' ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , 'r' ) as f:
self.assertTrue(f.read() , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
UpperCAmelCase : List[Any] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Any:
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , lowercase_ ) , )
# Copy consistency with a really long name
UpperCAmelCase : Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , lowercase_ , overwrite_result=re.sub('DDPM' , 'Test' , lowercase_ ) , )
| 695 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695 | 1 |
'''simple docstring'''
import os
import string
import sys
lowercase__ = 1 << 8
lowercase__ = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
lowercase__ = KEYMAP["up"]
lowercase__ = KEYMAP["left"]
if sys.platform == "win32":
lowercase__ = []
lowercase__ = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
lowercase__ = ord(str(i))
def UpperCamelCase( ):
if os.name == "nt":
import msvcrt
UpperCAmelCase : List[str] = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(UpperCAmelCase_ ) == 0:
# Read the keystroke
UpperCAmelCase : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
UpperCAmelCase : List[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
UpperCAmelCase : List[Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(UpperCAmelCase_ )
if ord(UpperCAmelCase_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
UpperCAmelCase : Any = chr(KEYMAP['esc'] )
except KeyError:
UpperCAmelCase : List[Any] = cha[1]
else:
UpperCAmelCase : Union[str, Any] = ch.decode(UpperCAmelCase_ )
else:
UpperCAmelCase : int = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
UpperCAmelCase : Optional[Any] = sys.stdin.fileno()
UpperCAmelCase : Dict = termios.tcgetattr(UpperCAmelCase_ )
try:
tty.setraw(UpperCAmelCase_ )
UpperCAmelCase : int = sys.stdin.read(1 )
finally:
termios.tcsetattr(UpperCAmelCase_ , termios.TCSADRAIN , UpperCAmelCase_ )
return ch
def UpperCamelCase( ):
UpperCAmelCase : Union[str, Any] = get_raw_chars()
if ord(UpperCAmelCase_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(UpperCAmelCase_ ) == KEYMAP["esc"]:
UpperCAmelCase : Dict = get_raw_chars()
if ord(UpperCAmelCase_ ) == KEYMAP["mod_int"]:
UpperCAmelCase : List[Any] = get_raw_chars()
if ord(UpperCAmelCase_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(UpperCAmelCase_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(UpperCAmelCase_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 695 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 695 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 | 1 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase__ = "path-to-your-trained-model"
lowercase__ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
lowercase__ = "A photo of sks dog in a bucket"
lowercase__ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 695 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """whisper"""
UpperCAmelCase_ : Tuple = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : Any=51_865 , lowercase_ : List[Any]=80 , lowercase_ : int=6 , lowercase_ : Dict=4 , lowercase_ : List[Any]=6 , lowercase_ : Any=4 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=50_257 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : str="gelu" , lowercase_ : List[str]=256 , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=1_500 , lowercase_ : List[Any]=448 , lowercase_ : int=50_256 , lowercase_ : Union[str, Any]=50_256 , lowercase_ : List[Any]=50_256 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=[220, 50_256] , lowercase_ : Tuple=False , lowercase_ : str=256 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.05 , lowercase_ : Any=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : int=0 , lowercase_ : Optional[int]=7 , **lowercase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Any = num_mel_bins
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Tuple = max_source_positions
UpperCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : str = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Tuple = mask_feature_length
UpperCAmelCase : Optional[int] = mask_feature_min_masks
UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase : int = {0: 'batch'}
else:
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22_050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Tuple = OrderedDict()
UpperCAmelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase : Optional[Any] = encoder_inputs['input_features'].shape[2]
UpperCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
| 695 | 1 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class A_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict=99 , lowercase_ : Optional[Any]=13 , lowercase_ : Dict=16 , lowercase_ : Union[str, Any]=7 , lowercase_ : List[str]=True , lowercase_ : Optional[int]=True , lowercase_ : Optional[int]=True , lowercase_ : Optional[Any]=False , lowercase_ : Tuple=True , lowercase_ : Any=2 , lowercase_ : Union[str, Any]=32 , lowercase_ : Optional[int]=4 , lowercase_ : str=4 , lowercase_ : Optional[Any]=30 , lowercase_ : int=0 , lowercase_ : Optional[int]=1 , lowercase_ : str=2 , lowercase_ : Tuple=None , ) -> Optional[Any]:
UpperCAmelCase : List[str] = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : Optional[int] = decoder_seq_length
# For common tests
UpperCAmelCase : Union[str, Any] = self.decoder_seq_length
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : Union[str, Any] = use_attention_mask
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : Tuple = d_model
UpperCAmelCase : Optional[Any] = d_model
UpperCAmelCase : int = decoder_layers
UpperCAmelCase : Optional[Any] = decoder_layers
UpperCAmelCase : Dict = decoder_ffn_dim
UpperCAmelCase : List[Any] = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = decoder_attention_heads
UpperCAmelCase : List[str] = eos_token_id
UpperCAmelCase : Any = bos_token_id
UpperCAmelCase : Dict = pad_token_id
UpperCAmelCase : List[str] = decoder_start_token_id
UpperCAmelCase : Optional[Any] = use_cache
UpperCAmelCase : List[str] = max_position_embeddings
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : List[Any] = decoder_seq_length
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : Any = 1
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
UpperCAmelCase : Dict = None
if self.use_labels:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCAmelCase : Any = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCAmelCase_ ( self : int , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , ) -> Tuple:
UpperCAmelCase : str = True
UpperCAmelCase : List[str] = TrOCRDecoder(config=lowercase_ ).to(lowercase_ ).eval()
UpperCAmelCase : List[str] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
UpperCAmelCase : Tuple = model(lowercase_ , use_cache=lowercase_ )
UpperCAmelCase : Union[str, Any] = model(lowercase_ )
UpperCAmelCase : Dict = model(lowercase_ , use_cache=lowercase_ )
self.parent.assertTrue(len(lowercase_ ) == len(lowercase_ ) )
self.parent.assertTrue(len(lowercase_ ) == len(lowercase_ ) + 1 )
UpperCAmelCase : Any = outputs['past_key_values']
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase : int = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
UpperCAmelCase : int = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : Optional[Any] = model(lowercase_ )['last_hidden_state']
UpperCAmelCase : int = model(lowercase_ , past_key_values=lowercase_ )['last_hidden_state']
# select random slice
UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : Optional[Any] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
UpperCAmelCase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = config_and_inputs
UpperCAmelCase : Dict = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_torch
class A_ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
UpperCAmelCase_ : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
UpperCAmelCase_ : Any = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Optional[int] = False
def UpperCAmelCase_ ( self : Any ) -> int:
UpperCAmelCase : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=lowercase_ )
UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
pass
def UpperCAmelCase_ ( self : List[Any] ) -> str:
pass
def UpperCAmelCase_ ( self : str ) -> List[str]:
pass
def UpperCAmelCase_ ( self : List[str] ) -> Any:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Any:
return
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def UpperCAmelCase_ ( self : Dict ) -> Dict:
pass
| 695 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase__ = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ):
UpperCAmelCase : Any = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase : Dict = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase : Dict = torch.cuda.device_count()
UpperCAmelCase : List[Any] = num_gpus
UpperCAmelCase : List[Any] = False
if num_gpus > 1:
UpperCAmelCase : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase : Optional[int] = torch.xpu.device_count()
UpperCAmelCase : Optional[int] = num_xpus
UpperCAmelCase : Any = False
if num_xpus > 1:
UpperCAmelCase : Tuple = 'MULTI_XPU'
else:
UpperCAmelCase : str = 'NO'
elif is_npu_available():
UpperCAmelCase : Optional[int] = torch.npu.device_count()
UpperCAmelCase : str = num_npus
UpperCAmelCase : int = False
if num_npus > 1:
UpperCAmelCase : int = 'MULTI_NPU'
else:
UpperCAmelCase : List[str] = 'NO'
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = True
UpperCAmelCase : str = 1
UpperCAmelCase : str = 'NO'
UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 695 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCamelCase( ):
UpperCAmelCase : Union[str, Any] = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
UpperCAmelCase : Optional[int] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert('RGB' )
return image
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Any = dct.pop(UpperCAmelCase_ )
UpperCAmelCase : List[str] = val
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase : Optional[Any] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
UpperCAmelCase : str = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
UpperCAmelCase : str = torch.cat((q_bias, torch.zeros_like(UpperCAmelCase_ , requires_grad=UpperCAmelCase_ ), v_bias) )
UpperCAmelCase : Tuple = qkv_bias
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = 3_64 if 'coco' in model_name else 2_24
UpperCAmelCase : Optional[int] = InstructBlipVisionConfig(image_size=UpperCAmelCase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
UpperCAmelCase : List[Any] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase : List[str] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
UpperCAmelCase : List[str] = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_20_01 ).to_dict()
elif "vicuna-13b" in model_name:
UpperCAmelCase : int = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_20_01 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
UpperCAmelCase : Tuple = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict()
UpperCAmelCase : List[str] = InstructBlipConfig(vision_config=UpperCAmelCase_ , text_config=UpperCAmelCase_ , qformer_config=UpperCAmelCase_ )
return config, image_size
@torch.no_grad()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=False ):
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
UpperCAmelCase : Optional[Any] = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
UpperCAmelCase : Any = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
UpperCAmelCase , UpperCAmelCase : Dict = get_blipa_config(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = InstructBlipForConditionalGeneration(UpperCAmelCase_ ).eval()
UpperCAmelCase : Optional[Any] = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
UpperCAmelCase , UpperCAmelCase : Tuple = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
UpperCAmelCase : Tuple = 'cuda:1' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase : Dict = 'cuda:2' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = load_model_and_preprocess(
name=UpperCAmelCase_ , model_type=UpperCAmelCase_ , is_eval=UpperCAmelCase_ , device=UpperCAmelCase_ )
original_model.eval()
print('Done!' )
# update state dict keys
UpperCAmelCase : Any = original_model.state_dict()
UpperCAmelCase : Tuple = create_rename_keys(UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase : Optional[Any] = state_dict.pop(UpperCAmelCase_ )
if key.startswith('Qformer.bert' ):
UpperCAmelCase : Dict = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
UpperCAmelCase : Optional[Any] = key.replace('self' , 'attention' )
if "llm_proj" in key:
UpperCAmelCase : Dict = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
UpperCAmelCase : Tuple = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
UpperCAmelCase : str = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
UpperCAmelCase : List[Any] = key.replace('t5' , 'language' )
UpperCAmelCase : int = val
# read in qv biases
read_in_q_v_bias(UpperCAmelCase_ , UpperCAmelCase_ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = load_demo_image()
UpperCAmelCase : Optional[Any] = 'What is unusual about this image?'
# create processor
UpperCAmelCase : Optional[Any] = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=UpperCAmelCase_ , image_std=UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = InstructBlipProcessor(
image_processor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , qformer_tokenizer=UpperCAmelCase_ , )
UpperCAmelCase : List[Any] = processor(images=UpperCAmelCase_ , text=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ )
# make sure processor creates exact same pixel values
UpperCAmelCase : Any = vis_processors['eval'](UpperCAmelCase_ ).unsqueeze(0 ).to(UpperCAmelCase_ )
UpperCAmelCase : Tuple = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , UpperCAmelCase_ )
original_model.to(UpperCAmelCase_ )
hf_model.to(UpperCAmelCase_ )
with torch.no_grad():
if "vicuna" in model_name:
UpperCAmelCase : Dict = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
UpperCAmelCase : List[Any] = hf_model(**UpperCAmelCase_ ).logits
else:
UpperCAmelCase : int = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
UpperCAmelCase : List[Any] = tokenizer('\n' , return_tensors='pt' ).input_ids.to(UpperCAmelCase_ )
UpperCAmelCase : Tuple = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_00 )
UpperCAmelCase : Optional[Any] = hf_model(**UpperCAmelCase_ , labels=UpperCAmelCase_ ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
UpperCAmelCase : int = 1E-4 if 'vicuna' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , UpperCAmelCase_ , atol=UpperCAmelCase_ )
print('Looks ok!' )
print('Generating with original model...' )
UpperCAmelCase : Tuple = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
UpperCAmelCase : Any = hf_model.generate(
**UpperCAmelCase_ , do_sample=UpperCAmelCase_ , num_beams=5 , max_length=2_56 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
UpperCAmelCase : Any = 2
print('Original generation:' , UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = processor.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
UpperCAmelCase : List[Any] = [text.strip() for text in output_text]
print('HF generation:' , UpperCAmelCase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCAmelCase_ )
hf_model.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
lowercase__ = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
lowercase__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 695 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# Initialise PyTorch model
UpperCAmelCase : Any = RemBertConfig.from_json_file(UpperCAmelCase_ )
print('Building PyTorch model from configuration: {}'.format(str(UpperCAmelCase_ ) ) )
UpperCAmelCase : Union[str, Any] = RemBertModel(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(UpperCAmelCase_ ) )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 695 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase__ = logging.getLogger(__name__)
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[Union[int, float]] = None
UpperCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : List[str] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : List[str]=False , lowercase_ : bool = False , ) -> Optional[Any]:
UpperCAmelCase : Dict = hans_processors[task]()
UpperCAmelCase : List[Any] = os.path.join(
lowercase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : int = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase : Tuple = torch.load(lowercase_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase : int = (
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('Training examples: %s' , len(lowercase_ ) )
UpperCAmelCase : Dict = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('Saving features into cached file %s' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ) -> str:
return len(self.features )
def __getitem__( self : Dict , lowercase_ : Dict ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : Tuple , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : int=False , lowercase_ : bool = False , ) -> Union[str, Any]:
UpperCAmelCase : int = hans_processors[task]()
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Any = label_list
UpperCAmelCase : str = processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCAmelCase : int = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
lowercase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.dataset
def __len__( self : Tuple ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : List[Any] , lowercase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.label_list
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCAmelCase : Tuple = '%s-%s' % (set_type, line[0])
UpperCAmelCase : Tuple = line[5]
UpperCAmelCase : Dict = line[6]
UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase_ )}
UpperCAmelCase : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase : int = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase : Any = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowercase__ = {
"hans": 3,
}
lowercase__ = {
"hans": HansProcessor,
}
| 695 | 1 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 1 |
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase__ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
lowercase__ = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
lowercase__ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase__ = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
lowercase__ = "allenai"
def UpperCamelCase( UpperCAmelCase_ ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
UpperCAmelCase : List[str] = dict((re.sub(R'@@$' , '' , UpperCAmelCase_ ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , UpperCAmelCase_ ), v) for k, v in d.items() )
UpperCAmelCase : str = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
UpperCAmelCase : Dict = d[k] # restore
return da
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
# prep
assert os.path.exists(UpperCAmelCase_ )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
UpperCAmelCase : Optional[int] = basename(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = dirname(UpperCAmelCase_ )
UpperCAmelCase : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
UpperCAmelCase : Optional[int] = cls.hub_models()
UpperCAmelCase : Optional[Any] = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
UpperCAmelCase : Optional[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F"""using checkpoint {checkpoint_file}""" )
UpperCAmelCase : int = hub_utils.from_pretrained(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , archive_map=UpperCAmelCase_ , **UpperCAmelCase_ )
UpperCAmelCase : Tuple = vars(chkpt['args']['model'] )
UpperCAmelCase : Union[str, Any] = args['source_lang']
UpperCAmelCase : Any = args['target_lang']
UpperCAmelCase : str = dirname(UpperCAmelCase_ )
UpperCAmelCase : str = basename(UpperCAmelCase_ )
# dicts
UpperCAmelCase : Optional[Any] = os.path.join(UpperCAmelCase_ , F"""dict.{src_lang}.txt""" )
UpperCAmelCase : Dict = os.path.join(UpperCAmelCase_ , F"""dict.{tgt_lang}.txt""" )
UpperCAmelCase : List[Any] = Dictionary.load(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase : List[Any] = len(UpperCAmelCase_ )
UpperCAmelCase : Any = os.path.join(UpperCAmelCase_ , 'vocab-src.json' )
print(F"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ , indent=UpperCAmelCase_ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
UpperCAmelCase : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
UpperCAmelCase : Optional[int] = False
break
UpperCAmelCase : Any = Dictionary.load(UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
UpperCAmelCase : List[str] = len(UpperCAmelCase_ )
UpperCAmelCase : Dict = os.path.join(UpperCAmelCase_ , 'vocab-tgt.json' )
print(F"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ , indent=UpperCAmelCase_ ) )
# merges_file (bpecodes)
UpperCAmelCase : Optional[int] = os.path.join(UpperCAmelCase_ , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
UpperCAmelCase : int = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if os.path.exists(UpperCAmelCase_ ):
break
with open(UpperCAmelCase_ , encoding='utf-8' ) as fin:
UpperCAmelCase : Dict = fin.read()
UpperCAmelCase : str = re.sub(R' \d+$' , '' , UpperCAmelCase_ , 0 , re.M ) # remove frequency number
print(F"""Generating {merges_file}""" )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as fout:
fout.write(UpperCAmelCase_ )
# model config
UpperCAmelCase : str = os.path.join(UpperCAmelCase_ , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F"""need to extend tokenizer to support bpe={args["bpe"]}"""
assert args["tokenizer"] == "moses", F"""need to extend tokenizer to support bpe={args["tokenizer"]}"""
UpperCAmelCase : Optional[Any] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
UpperCAmelCase : int = 5
UpperCAmelCase : Any = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
UpperCAmelCase : Dict = best_score_hparams[model_dir]['length_penalty']
else:
UpperCAmelCase : Union[str, Any] = 1.0
print(F"""Generating {fsmt_model_config_file}""" )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ , indent=UpperCAmelCase_ ) )
# tokenizer config
UpperCAmelCase : List[Any] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : List[Any] = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(F"""Generating {fsmt_tokenizer_config_file}""" )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ , indent=UpperCAmelCase_ ) )
# model
UpperCAmelCase : Any = chkpt['models'][0]
UpperCAmelCase : Optional[int] = model.state_dict()
# rename keys to start with 'model.'
UpperCAmelCase : List[str] = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
UpperCAmelCase : List[Any] = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Tuple = FSMTConfig.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : int = FSMTForConditionalGeneration(UpperCAmelCase_ )
# check that it loads ok
model_new.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
# save
UpperCAmelCase : Tuple = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(F"""cd {data_root}""" )
print(F"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 | 1 |
'''simple docstring'''
from math import pi, sqrt
def UpperCamelCase( UpperCAmelCase_ ):
if num <= 0:
raise ValueError('math domain error' )
if num > 171.5:
raise OverflowError('math range error' )
elif num - int(UpperCAmelCase_ ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(UpperCAmelCase_ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def UpperCamelCase( ):
assert gamma(0.5 ) == sqrt(UpperCAmelCase_ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase__ = 1.0
while num:
lowercase__ = float(input("Gamma of: "))
print(f'''gamma({num}) = {gamma(num)}''')
print("\nEnter 0 to exit...")
| 695 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """blip_2_vision_model"""
def __init__( self : List[str] , lowercase_ : int=1_408 , lowercase_ : Tuple=6_144 , lowercase_ : Dict=39 , lowercase_ : Optional[int]=16 , lowercase_ : str=224 , lowercase_ : Any=14 , lowercase_ : int="gelu" , lowercase_ : int=0.0_0001 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=1E-10 , lowercase_ : List[str]=True , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : str = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip_2_qformer"""
def __init__( self : Tuple , lowercase_ : Union[str, Any]=30_522 , lowercase_ : int=768 , lowercase_ : Dict=12 , lowercase_ : Dict=12 , lowercase_ : int=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[int]=2 , lowercase_ : str=1_408 , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip-2"""
UpperCAmelCase_ : Any = True
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : Dict=32 , **lowercase_ : Union[str, Any] ) -> Any:
super().__init__(**lowercase_ )
if vision_config is None:
UpperCAmelCase : Union[str, Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
UpperCAmelCase : int = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
UpperCAmelCase : Dict = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCAmelCase : str = BlipaVisionConfig(**lowercase_ )
UpperCAmelCase : str = BlipaQFormerConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCAmelCase : int = CONFIG_MAPPING[text_model_type](**lowercase_ )
UpperCAmelCase : Optional[int] = self.text_config.tie_word_embeddings
UpperCAmelCase : Dict = self.text_config.is_encoder_decoder
UpperCAmelCase : Tuple = num_query_tokens
UpperCAmelCase : Tuple = self.vision_config.hidden_size
UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Union[str, Any] = 1.0
UpperCAmelCase : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls : Dict , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase : List[str] = self.text_config.to_dict()
UpperCAmelCase : str = self.__class__.model_type
return output
| 695 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """wavlm"""
def __init__( self : Tuple , lowercase_ : List[str]=32 , lowercase_ : Union[str, Any]=768 , lowercase_ : Dict=12 , lowercase_ : Any=12 , lowercase_ : List[Any]=3_072 , lowercase_ : str="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Dict=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : str=1E-5 , lowercase_ : Optional[Any]="group" , lowercase_ : int="gelu" , lowercase_ : Any=(512, 512, 512, 512, 512, 512, 512) , lowercase_ : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=128 , lowercase_ : Union[str, Any]=16 , lowercase_ : Tuple=320 , lowercase_ : Tuple=800 , lowercase_ : Dict=False , lowercase_ : Optional[Any]=True , lowercase_ : List[Any]=0.05 , lowercase_ : Tuple=10 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Union[str, Any]=10 , lowercase_ : str=320 , lowercase_ : Optional[Any]=2 , lowercase_ : List[Any]=0.1 , lowercase_ : str=100 , lowercase_ : List[Any]=256 , lowercase_ : List[str]=256 , lowercase_ : int=0.1 , lowercase_ : int="mean" , lowercase_ : Any=False , lowercase_ : str=False , lowercase_ : str=256 , lowercase_ : Dict=(512, 512, 512, 512, 1_500) , lowercase_ : Union[str, Any]=(5, 3, 3, 1, 1) , lowercase_ : Any=(1, 2, 3, 1, 1) , lowercase_ : Any=512 , lowercase_ : Any=80 , lowercase_ : Union[str, Any]=0 , lowercase_ : Any=1 , lowercase_ : Optional[int]=2 , lowercase_ : List[Any]=False , lowercase_ : List[Any]=3 , lowercase_ : Tuple=2 , lowercase_ : Union[str, Any]=3 , lowercase_ : Any=None , **lowercase_ : Dict , ) -> Optional[int]:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : List[str] = feat_extract_norm
UpperCAmelCase : List[str] = feat_extract_activation
UpperCAmelCase : Any = list(lowercase_ )
UpperCAmelCase : int = list(lowercase_ )
UpperCAmelCase : List[str] = list(lowercase_ )
UpperCAmelCase : Optional[int] = conv_bias
UpperCAmelCase : Optional[int] = num_buckets
UpperCAmelCase : Optional[int] = max_bucket_distance
UpperCAmelCase : Tuple = num_conv_pos_embeddings
UpperCAmelCase : Optional[int] = num_conv_pos_embedding_groups
UpperCAmelCase : List[str] = len(self.conv_dim )
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : Optional[int] = hidden_dropout
UpperCAmelCase : Optional[int] = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : int = feat_proj_dropout
UpperCAmelCase : Any = final_dropout
UpperCAmelCase : Any = layerdrop
UpperCAmelCase : Dict = layer_norm_eps
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : Tuple = num_ctc_classes
UpperCAmelCase : int = vocab_size
UpperCAmelCase : str = do_stable_layer_norm
UpperCAmelCase : Tuple = use_weighted_layer_sum
UpperCAmelCase : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Tuple = apply_spec_augment
UpperCAmelCase : Optional[int] = mask_time_prob
UpperCAmelCase : int = mask_time_length
UpperCAmelCase : Optional[int] = mask_time_min_masks
UpperCAmelCase : Tuple = mask_feature_prob
UpperCAmelCase : Dict = mask_feature_length
# parameters for pretraining with codevector quantized representations
UpperCAmelCase : Union[str, Any] = num_codevectors_per_group
UpperCAmelCase : str = num_codevector_groups
UpperCAmelCase : Any = contrastive_logits_temperature
UpperCAmelCase : Dict = num_negatives
UpperCAmelCase : Optional[Any] = codevector_dim
UpperCAmelCase : Optional[int] = proj_codevector_dim
UpperCAmelCase : Tuple = diversity_loss_weight
# ctc loss
UpperCAmelCase : List[str] = ctc_loss_reduction
UpperCAmelCase : Optional[int] = ctc_zero_infinity
# adapter
UpperCAmelCase : Dict = add_adapter
UpperCAmelCase : Optional[Any] = adapter_kernel_size
UpperCAmelCase : Dict = adapter_stride
UpperCAmelCase : Optional[Any] = num_adapter_layers
UpperCAmelCase : Tuple = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : str = list(lowercase_ )
UpperCAmelCase : Optional[Any] = list(lowercase_ )
UpperCAmelCase : List[Any] = list(lowercase_ )
UpperCAmelCase : Optional[Any] = xvector_output_dim
@property
def UpperCAmelCase_ ( self : str ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 695 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.